1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Sleepable Read-Copy Update mechanism for mutual exclusion 4 * 5 * Copyright (C) IBM Corporation, 2006 6 * Copyright (C) Fujitsu, 2012 7 * 8 * Author: Paul McKenney <[email protected]> 9 * Lai Jiangshan <[email protected]> 10 * 11 * For detailed explanation of Read-Copy Update mechanism see - 12 * Documentation/RCU/ *.txt 13 * 14 */ 15 16 #ifndef _LINUX_SRCU_H 17 #define _LINUX_SRCU_H 18 19 #include <linux/mutex.h> 20 #include <linux/rcupdate.h> 21 #include <linux/workqueue.h> 22 #include <linux/rcu_segcblist.h> 23 24 struct srcu_struct; 25 26 #ifdef CONFIG_DEBUG_LOCK_ALLOC 27 28 int __init_srcu_struct(struct srcu_struct *ssp, const char *name, 29 struct lock_class_key *key); 30 31 #define init_srcu_struct(ssp) \ 32 ({ \ 33 static struct lock_class_key __srcu_key; \ 34 \ 35 __init_srcu_struct((ssp), #ssp, &__srcu_key); \ 36 }) 37 38 #define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, 39 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 40 41 int init_srcu_struct(struct srcu_struct *ssp); 42 43 #define __SRCU_DEP_MAP_INIT(srcu_name) 44 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 45 46 /* Values for SRCU Tree srcu_data ->srcu_reader_flavor, but also used by rcutorture. */ 47 #define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock(). 48 #define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe(). 49 #define SRCU_READ_FLAVOR_LITE 0x4 // srcu_read_lock_lite(). 50 #define SRCU_READ_FLAVOR_ALL (SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_NMI | \ 51 SRCU_READ_FLAVOR_LITE) // All of the above. 52 #define SRCU_READ_FLAVOR_SLOWGP SRCU_READ_FLAVOR_LITE 53 // Flavors requiring synchronize_rcu() 54 // instead of smp_mb(). 55 void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); 56 57 #ifdef CONFIG_TINY_SRCU 58 #include <linux/srcutiny.h> 59 #elif defined(CONFIG_TREE_SRCU) 60 #include <linux/srcutree.h> 61 #else 62 #error "Unknown SRCU implementation specified to kernel configuration" 63 #endif 64 65 void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, 66 void (*func)(struct rcu_head *head)); 67 void cleanup_srcu_struct(struct srcu_struct *ssp); 68 void synchronize_srcu(struct srcu_struct *ssp); 69 70 #define SRCU_GET_STATE_COMPLETED 0x1 71 72 /** 73 * get_completed_synchronize_srcu - Return a pre-completed polled state cookie 74 * 75 * Returns a value that poll_state_synchronize_srcu() will always treat 76 * as a cookie whose grace period has already completed. 77 */ 78 static inline unsigned long get_completed_synchronize_srcu(void) 79 { 80 return SRCU_GET_STATE_COMPLETED; 81 } 82 83 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); 84 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); 85 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); 86 87 // Maximum number of unsigned long values corresponding to 88 // not-yet-completed SRCU grace periods. 89 #define NUM_ACTIVE_SRCU_POLL_OLDSTATE 2 90 91 /** 92 * same_state_synchronize_srcu - Are two old-state values identical? 93 * @oldstate1: First old-state value. 94 * @oldstate2: Second old-state value. 95 * 96 * The two old-state values must have been obtained from either 97 * get_state_synchronize_srcu(), start_poll_synchronize_srcu(), or 98 * get_completed_synchronize_srcu(). Returns @true if the two values are 99 * identical and @false otherwise. This allows structures whose lifetimes 100 * are tracked by old-state values to push these values to a list header, 101 * allowing those structures to be slightly smaller. 102 */ 103 static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned long oldstate2) 104 { 105 return oldstate1 == oldstate2; 106 } 107 108 #ifdef CONFIG_NEED_SRCU_NMI_SAFE 109 int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp); 110 void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp); 111 #else 112 static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) 113 { 114 return __srcu_read_lock(ssp); 115 } 116 static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) 117 { 118 __srcu_read_unlock(ssp, idx); 119 } 120 #endif /* CONFIG_NEED_SRCU_NMI_SAFE */ 121 122 void srcu_init(void); 123 124 #ifdef CONFIG_DEBUG_LOCK_ALLOC 125 126 /** 127 * srcu_read_lock_held - might we be in SRCU read-side critical section? 128 * @ssp: The srcu_struct structure to check 129 * 130 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU 131 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 132 * this assumes we are in an SRCU read-side critical section unless it can 133 * prove otherwise. 134 * 135 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot 136 * and while lockdep is disabled. 137 * 138 * Note that SRCU is based on its own statemachine and it doesn't 139 * relies on normal RCU, it can be called from the CPU which 140 * is in the idle loop from an RCU point of view or offline. 141 */ 142 static inline int srcu_read_lock_held(const struct srcu_struct *ssp) 143 { 144 if (!debug_lockdep_rcu_enabled()) 145 return 1; 146 return lock_is_held(&ssp->dep_map); 147 } 148 149 /* 150 * Annotations provide deadlock detection for SRCU. 151 * 152 * Similar to other lockdep annotations, except there is an additional 153 * srcu_lock_sync(), which is basically an empty *write*-side critical section, 154 * see lock_sync() for more information. 155 */ 156 157 /* Annotates a srcu_read_lock() */ 158 static inline void srcu_lock_acquire(struct lockdep_map *map) 159 { 160 lock_map_acquire_read(map); 161 } 162 163 /* Annotates a srcu_read_lock() */ 164 static inline void srcu_lock_release(struct lockdep_map *map) 165 { 166 lock_map_release(map); 167 } 168 169 /* Annotates a synchronize_srcu() */ 170 static inline void srcu_lock_sync(struct lockdep_map *map) 171 { 172 lock_map_sync(map); 173 } 174 175 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 176 177 static inline int srcu_read_lock_held(const struct srcu_struct *ssp) 178 { 179 return 1; 180 } 181 182 #define srcu_lock_acquire(m) do { } while (0) 183 #define srcu_lock_release(m) do { } while (0) 184 #define srcu_lock_sync(m) do { } while (0) 185 186 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 187 188 189 /** 190 * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing 191 * @p: the pointer to fetch and protect for later dereferencing 192 * @ssp: pointer to the srcu_struct, which is used to check that we 193 * really are in an SRCU read-side critical section. 194 * @c: condition to check for update-side use 195 * 196 * If PROVE_RCU is enabled, invoking this outside of an RCU read-side 197 * critical section will result in an RCU-lockdep splat, unless @c evaluates 198 * to 1. The @c argument will normally be a logical expression containing 199 * lockdep_is_held() calls. 200 */ 201 #define srcu_dereference_check(p, ssp, c) \ 202 __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ 203 (c) || srcu_read_lock_held(ssp), __rcu) 204 205 /** 206 * srcu_dereference - fetch SRCU-protected pointer for later dereferencing 207 * @p: the pointer to fetch and protect for later dereferencing 208 * @ssp: pointer to the srcu_struct, which is used to check that we 209 * really are in an SRCU read-side critical section. 210 * 211 * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU 212 * is enabled, invoking this outside of an RCU read-side critical 213 * section will result in an RCU-lockdep splat. 214 */ 215 #define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0) 216 217 /** 218 * srcu_dereference_notrace - no tracing and no lockdep calls from here 219 * @p: the pointer to fetch and protect for later dereferencing 220 * @ssp: pointer to the srcu_struct, which is used to check that we 221 * really are in an SRCU read-side critical section. 222 */ 223 #define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1) 224 225 /** 226 * srcu_read_lock - register a new reader for an SRCU-protected structure. 227 * @ssp: srcu_struct in which to register the new reader. 228 * 229 * Enter an SRCU read-side critical section. Note that SRCU read-side 230 * critical sections may be nested. However, it is illegal to 231 * call anything that waits on an SRCU grace period for the same 232 * srcu_struct, whether directly or indirectly. Please note that 233 * one way to indirectly wait on an SRCU grace period is to acquire 234 * a mutex that is held elsewhere while calling synchronize_srcu() or 235 * synchronize_srcu_expedited(). 236 * 237 * The return value from srcu_read_lock() is guaranteed to be 238 * non-negative. This value must be passed unaltered to the matching 239 * srcu_read_unlock(). Note that srcu_read_lock() and the matching 240 * srcu_read_unlock() must occur in the same context, for example, it is 241 * illegal to invoke srcu_read_unlock() in an irq handler if the matching 242 * srcu_read_lock() was invoked in process context. Or, for that matter to 243 * invoke srcu_read_unlock() from one task and the matching srcu_read_lock() 244 * from another. 245 */ 246 static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp) 247 { 248 int retval; 249 250 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 251 retval = __srcu_read_lock(ssp); 252 srcu_lock_acquire(&ssp->dep_map); 253 return retval; 254 } 255 256 /** 257 * srcu_read_lock_lite - register a new reader for an SRCU-protected structure. 258 * @ssp: srcu_struct in which to register the new reader. 259 * 260 * Enter an SRCU read-side critical section, but for a light-weight 261 * smp_mb()-free reader. See srcu_read_lock() for more information. 262 * 263 * If srcu_read_lock_lite() is ever used on an srcu_struct structure, 264 * then none of the other flavors may be used, whether before, during, 265 * or after. Note that grace-period auto-expediting is disabled for _lite 266 * srcu_struct structures because auto-expedited grace periods invoke 267 * synchronize_rcu_expedited(), IPIs and all. 268 * 269 * Note that srcu_read_lock_lite() can be invoked only from those contexts 270 * where RCU is watching, that is, from contexts where it would be legal 271 * to invoke rcu_read_lock(). Otherwise, lockdep will complain. 272 */ 273 static inline int srcu_read_lock_lite(struct srcu_struct *ssp) __acquires(ssp) 274 { 275 int retval; 276 277 srcu_check_read_flavor_force(ssp, SRCU_READ_FLAVOR_LITE); 278 retval = __srcu_read_lock_lite(ssp); 279 rcu_try_lock_acquire(&ssp->dep_map); 280 return retval; 281 } 282 283 /** 284 * srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure. 285 * @ssp: srcu_struct in which to register the new reader. 286 * 287 * Enter an SRCU read-side critical section, but in an NMI-safe manner. 288 * See srcu_read_lock() for more information. 289 * 290 * If srcu_read_lock_nmisafe() is ever used on an srcu_struct structure, 291 * then none of the other flavors may be used, whether before, during, 292 * or after. 293 */ 294 static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp) 295 { 296 int retval; 297 298 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI); 299 retval = __srcu_read_lock_nmisafe(ssp); 300 rcu_try_lock_acquire(&ssp->dep_map); 301 return retval; 302 } 303 304 /* Used by tracing, cannot be traced and cannot invoke lockdep. */ 305 static inline notrace int 306 srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) 307 { 308 int retval; 309 310 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 311 retval = __srcu_read_lock(ssp); 312 return retval; 313 } 314 315 /** 316 * srcu_down_read - register a new reader for an SRCU-protected structure. 317 * @ssp: srcu_struct in which to register the new reader. 318 * 319 * Enter a semaphore-like SRCU read-side critical section. Note that 320 * SRCU read-side critical sections may be nested. However, it is 321 * illegal to call anything that waits on an SRCU grace period for the 322 * same srcu_struct, whether directly or indirectly. Please note that 323 * one way to indirectly wait on an SRCU grace period is to acquire 324 * a mutex that is held elsewhere while calling synchronize_srcu() or 325 * synchronize_srcu_expedited(). But if you want lockdep to help you 326 * keep this stuff straight, you should instead use srcu_read_lock(). 327 * 328 * The semaphore-like nature of srcu_down_read() means that the matching 329 * srcu_up_read() can be invoked from some other context, for example, 330 * from some other task or from an irq handler. However, neither 331 * srcu_down_read() nor srcu_up_read() may be invoked from an NMI handler. 332 * 333 * Calls to srcu_down_read() may be nested, similar to the manner in 334 * which calls to down_read() may be nested. 335 */ 336 static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp) 337 { 338 WARN_ON_ONCE(in_nmi()); 339 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 340 return __srcu_read_lock(ssp); 341 } 342 343 /** 344 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. 345 * @ssp: srcu_struct in which to unregister the old reader. 346 * @idx: return value from corresponding srcu_read_lock(). 347 * 348 * Exit an SRCU read-side critical section. 349 */ 350 static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) 351 __releases(ssp) 352 { 353 WARN_ON_ONCE(idx & ~0x1); 354 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 355 srcu_lock_release(&ssp->dep_map); 356 __srcu_read_unlock(ssp, idx); 357 } 358 359 /** 360 * srcu_read_unlock_lite - unregister a old reader from an SRCU-protected structure. 361 * @ssp: srcu_struct in which to unregister the old reader. 362 * @idx: return value from corresponding srcu_read_lock(). 363 * 364 * Exit a light-weight SRCU read-side critical section. 365 */ 366 static inline void srcu_read_unlock_lite(struct srcu_struct *ssp, int idx) 367 __releases(ssp) 368 { 369 WARN_ON_ONCE(idx & ~0x1); 370 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE); 371 srcu_lock_release(&ssp->dep_map); 372 __srcu_read_unlock_lite(ssp, idx); 373 } 374 375 /** 376 * srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure. 377 * @ssp: srcu_struct in which to unregister the old reader. 378 * @idx: return value from corresponding srcu_read_lock(). 379 * 380 * Exit an SRCU read-side critical section, but in an NMI-safe manner. 381 */ 382 static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) 383 __releases(ssp) 384 { 385 WARN_ON_ONCE(idx & ~0x1); 386 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI); 387 rcu_lock_release(&ssp->dep_map); 388 __srcu_read_unlock_nmisafe(ssp, idx); 389 } 390 391 /* Used by tracing, cannot be traced and cannot call lockdep. */ 392 static inline notrace void 393 srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp) 394 { 395 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 396 __srcu_read_unlock(ssp, idx); 397 } 398 399 /** 400 * srcu_up_read - unregister a old reader from an SRCU-protected structure. 401 * @ssp: srcu_struct in which to unregister the old reader. 402 * @idx: return value from corresponding srcu_read_lock(). 403 * 404 * Exit an SRCU read-side critical section, but not necessarily from 405 * the same context as the maching srcu_down_read(). 406 */ 407 static inline void srcu_up_read(struct srcu_struct *ssp, int idx) 408 __releases(ssp) 409 { 410 WARN_ON_ONCE(idx & ~0x1); 411 WARN_ON_ONCE(in_nmi()); 412 srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); 413 __srcu_read_unlock(ssp, idx); 414 } 415 416 /** 417 * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock 418 * 419 * Converts the preceding srcu_read_unlock into a two-way memory barrier. 420 * 421 * Call this after srcu_read_unlock, to guarantee that all memory operations 422 * that occur after smp_mb__after_srcu_read_unlock will appear to happen after 423 * the preceding srcu_read_unlock. 424 */ 425 static inline void smp_mb__after_srcu_read_unlock(void) 426 { 427 /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */ 428 } 429 430 /** 431 * smp_mb__after_srcu_read_lock - ensure full ordering after srcu_read_lock 432 * 433 * Converts the preceding srcu_read_lock into a two-way memory barrier. 434 * 435 * Call this after srcu_read_lock, to guarantee that all memory operations 436 * that occur after smp_mb__after_srcu_read_lock will appear to happen after 437 * the preceding srcu_read_lock. 438 */ 439 static inline void smp_mb__after_srcu_read_lock(void) 440 { 441 /* __srcu_read_lock has smp_mb() internally so nothing to do here. */ 442 } 443 444 DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct, 445 _T->idx = srcu_read_lock(_T->lock), 446 srcu_read_unlock(_T->lock, _T->idx), 447 int idx) 448 449 #endif 450