xref: /linux-6.15/include/linux/rcupdate.h (revision e756bc56)
1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright IBM Corporation, 2001
19  *
20  * Author: Dipankar Sarma <[email protected]>
21  *
22  * Based on the original work by Paul McKenney <[email protected]>
23  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24  * Papers:
25  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27  *
28  * For detailed explanation of Read-Copy Update mechanism see -
29  *		http://lse.sourceforge.net/locking/rcupdate.html
30  *
31  */
32 
33 #ifndef __LINUX_RCUPDATE_H
34 #define __LINUX_RCUPDATE_H
35 
36 #include <linux/types.h>
37 #include <linux/cache.h>
38 #include <linux/spinlock.h>
39 #include <linux/threads.h>
40 #include <linux/cpumask.h>
41 #include <linux/seqlock.h>
42 #include <linux/lockdep.h>
43 #include <linux/completion.h>
44 #include <linux/debugobjects.h>
45 #include <linux/bug.h>
46 #include <linux/compiler.h>
47 
48 #ifdef CONFIG_RCU_TORTURE_TEST
49 extern int rcutorture_runnable; /* for sysctl */
50 #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
51 
52 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
53 extern void rcutorture_record_test_transition(void);
54 extern void rcutorture_record_progress(unsigned long vernum);
55 extern void do_trace_rcu_torture_read(const char *rcutorturename,
56 				      struct rcu_head *rhp,
57 				      unsigned long secs,
58 				      unsigned long c_old,
59 				      unsigned long c);
60 #else
61 static inline void rcutorture_record_test_transition(void)
62 {
63 }
64 static inline void rcutorture_record_progress(unsigned long vernum)
65 {
66 }
67 #ifdef CONFIG_RCU_TRACE
68 extern void do_trace_rcu_torture_read(const char *rcutorturename,
69 				      struct rcu_head *rhp,
70 				      unsigned long secs,
71 				      unsigned long c_old,
72 				      unsigned long c);
73 #else
74 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
75 	do { } while (0)
76 #endif
77 #endif
78 
79 #define UINT_CMP_GE(a, b)	(UINT_MAX / 2 >= (a) - (b))
80 #define UINT_CMP_LT(a, b)	(UINT_MAX / 2 < (a) - (b))
81 #define ULONG_CMP_GE(a, b)	(ULONG_MAX / 2 >= (a) - (b))
82 #define ULONG_CMP_LT(a, b)	(ULONG_MAX / 2 < (a) - (b))
83 #define ulong2long(a)		(*(long *)(&(a)))
84 
85 /* Exported common interfaces */
86 
87 #ifdef CONFIG_PREEMPT_RCU
88 
89 /**
90  * call_rcu() - Queue an RCU callback for invocation after a grace period.
91  * @head: structure to be used for queueing the RCU updates.
92  * @func: actual callback function to be invoked after the grace period
93  *
94  * The callback function will be invoked some time after a full grace
95  * period elapses, in other words after all pre-existing RCU read-side
96  * critical sections have completed.  However, the callback function
97  * might well execute concurrently with RCU read-side critical sections
98  * that started after call_rcu() was invoked.  RCU read-side critical
99  * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
100  * and may be nested.
101  *
102  * Note that all CPUs must agree that the grace period extended beyond
103  * all pre-existing RCU read-side critical section.  On systems with more
104  * than one CPU, this means that when "func()" is invoked, each CPU is
105  * guaranteed to have executed a full memory barrier since the end of its
106  * last RCU read-side critical section whose beginning preceded the call
107  * to call_rcu().  It also means that each CPU executing an RCU read-side
108  * critical section that continues beyond the start of "func()" must have
109  * executed a memory barrier after the call_rcu() but before the beginning
110  * of that RCU read-side critical section.  Note that these guarantees
111  * include CPUs that are offline, idle, or executing in user mode, as
112  * well as CPUs that are executing in the kernel.
113  *
114  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
115  * resulting RCU callback function "func()", then both CPU A and CPU B are
116  * guaranteed to execute a full memory barrier during the time interval
117  * between the call to call_rcu() and the invocation of "func()" -- even
118  * if CPU A and CPU B are the same CPU (but again only if the system has
119  * more than one CPU).
120  */
121 extern void call_rcu(struct rcu_head *head,
122 			      void (*func)(struct rcu_head *head));
123 
124 #else /* #ifdef CONFIG_PREEMPT_RCU */
125 
126 /* In classic RCU, call_rcu() is just call_rcu_sched(). */
127 #define	call_rcu	call_rcu_sched
128 
129 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
130 
131 /**
132  * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
133  * @head: structure to be used for queueing the RCU updates.
134  * @func: actual callback function to be invoked after the grace period
135  *
136  * The callback function will be invoked some time after a full grace
137  * period elapses, in other words after all currently executing RCU
138  * read-side critical sections have completed. call_rcu_bh() assumes
139  * that the read-side critical sections end on completion of a softirq
140  * handler. This means that read-side critical sections in process
141  * context must not be interrupted by softirqs. This interface is to be
142  * used when most of the read-side critical sections are in softirq context.
143  * RCU read-side critical sections are delimited by :
144  *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
145  *  OR
146  *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
147  *  These may be nested.
148  *
149  * See the description of call_rcu() for more detailed information on
150  * memory ordering guarantees.
151  */
152 extern void call_rcu_bh(struct rcu_head *head,
153 			void (*func)(struct rcu_head *head));
154 
155 /**
156  * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
157  * @head: structure to be used for queueing the RCU updates.
158  * @func: actual callback function to be invoked after the grace period
159  *
160  * The callback function will be invoked some time after a full grace
161  * period elapses, in other words after all currently executing RCU
162  * read-side critical sections have completed. call_rcu_sched() assumes
163  * that the read-side critical sections end on enabling of preemption
164  * or on voluntary preemption.
165  * RCU read-side critical sections are delimited by :
166  *  - rcu_read_lock_sched() and  rcu_read_unlock_sched(),
167  *  OR
168  *  anything that disables preemption.
169  *  These may be nested.
170  *
171  * See the description of call_rcu() for more detailed information on
172  * memory ordering guarantees.
173  */
174 extern void call_rcu_sched(struct rcu_head *head,
175 			   void (*func)(struct rcu_head *rcu));
176 
177 extern void synchronize_sched(void);
178 
179 #ifdef CONFIG_PREEMPT_RCU
180 
181 extern void __rcu_read_lock(void);
182 extern void __rcu_read_unlock(void);
183 extern void rcu_read_unlock_special(struct task_struct *t);
184 void synchronize_rcu(void);
185 
186 /*
187  * Defined as a macro as it is a very low level header included from
188  * areas that don't even know about current.  This gives the rcu_read_lock()
189  * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
190  * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
191  */
192 #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
193 
194 #else /* #ifdef CONFIG_PREEMPT_RCU */
195 
196 static inline void __rcu_read_lock(void)
197 {
198 	preempt_disable();
199 }
200 
201 static inline void __rcu_read_unlock(void)
202 {
203 	preempt_enable();
204 }
205 
206 static inline void synchronize_rcu(void)
207 {
208 	synchronize_sched();
209 }
210 
211 static inline int rcu_preempt_depth(void)
212 {
213 	return 0;
214 }
215 
216 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
217 
218 /* Internal to kernel */
219 extern void rcu_init(void);
220 extern void rcu_sched_qs(int cpu);
221 extern void rcu_bh_qs(int cpu);
222 extern void rcu_check_callbacks(int cpu, int user);
223 struct notifier_block;
224 extern void rcu_idle_enter(void);
225 extern void rcu_idle_exit(void);
226 extern void rcu_irq_enter(void);
227 extern void rcu_irq_exit(void);
228 
229 #ifdef CONFIG_RCU_USER_QS
230 extern void rcu_user_enter(void);
231 extern void rcu_user_exit(void);
232 #else
233 static inline void rcu_user_enter(void) { }
234 static inline void rcu_user_exit(void) { }
235 static inline void rcu_user_hooks_switch(struct task_struct *prev,
236 					 struct task_struct *next) { }
237 #endif /* CONFIG_RCU_USER_QS */
238 
239 /**
240  * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
241  * @a: Code that RCU needs to pay attention to.
242  *
243  * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
244  * in the inner idle loop, that is, between the rcu_idle_enter() and
245  * the rcu_idle_exit() -- RCU will happily ignore any such read-side
246  * critical sections.  However, things like powertop need tracepoints
247  * in the inner idle loop.
248  *
249  * This macro provides the way out:  RCU_NONIDLE(do_something_with_RCU())
250  * will tell RCU that it needs to pay attending, invoke its argument
251  * (in this example, a call to the do_something_with_RCU() function),
252  * and then tell RCU to go back to ignoring this CPU.  It is permissible
253  * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
254  * quite limited.  If deeper nesting is required, it will be necessary
255  * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
256  */
257 #define RCU_NONIDLE(a) \
258 	do { \
259 		rcu_irq_enter(); \
260 		do { a; } while (0); \
261 		rcu_irq_exit(); \
262 	} while (0)
263 
264 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
265 extern bool __rcu_is_watching(void);
266 #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
267 
268 /*
269  * Infrastructure to implement the synchronize_() primitives in
270  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
271  */
272 
273 typedef void call_rcu_func_t(struct rcu_head *head,
274 			     void (*func)(struct rcu_head *head));
275 void wait_rcu_gp(call_rcu_func_t crf);
276 
277 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
278 #include <linux/rcutree.h>
279 #elif defined(CONFIG_TINY_RCU)
280 #include <linux/rcutiny.h>
281 #else
282 #error "Unknown RCU implementation specified to kernel configuration"
283 #endif
284 
285 /*
286  * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
287  * initialization and destruction of rcu_head on the stack. rcu_head structures
288  * allocated dynamically in the heap or defined statically don't need any
289  * initialization.
290  */
291 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
292 extern void init_rcu_head_on_stack(struct rcu_head *head);
293 extern void destroy_rcu_head_on_stack(struct rcu_head *head);
294 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
295 static inline void init_rcu_head_on_stack(struct rcu_head *head)
296 {
297 }
298 
299 static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
300 {
301 }
302 #endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
303 
304 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
305 bool rcu_lockdep_current_cpu_online(void);
306 #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
307 static inline bool rcu_lockdep_current_cpu_online(void)
308 {
309 	return 1;
310 }
311 #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
312 
313 #ifdef CONFIG_DEBUG_LOCK_ALLOC
314 
315 static inline void rcu_lock_acquire(struct lockdep_map *map)
316 {
317 	lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
318 }
319 
320 static inline void rcu_lock_release(struct lockdep_map *map)
321 {
322 	lock_release(map, 1, _THIS_IP_);
323 }
324 
325 extern struct lockdep_map rcu_lock_map;
326 extern struct lockdep_map rcu_bh_lock_map;
327 extern struct lockdep_map rcu_sched_lock_map;
328 extern int debug_lockdep_rcu_enabled(void);
329 
330 /**
331  * rcu_read_lock_held() - might we be in RCU read-side critical section?
332  *
333  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
334  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
335  * this assumes we are in an RCU read-side critical section unless it can
336  * prove otherwise.  This is useful for debug checks in functions that
337  * require that they be called within an RCU read-side critical section.
338  *
339  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
340  * and while lockdep is disabled.
341  *
342  * Note that rcu_read_lock() and the matching rcu_read_unlock() must
343  * occur in the same context, for example, it is illegal to invoke
344  * rcu_read_unlock() in process context if the matching rcu_read_lock()
345  * was invoked from within an irq handler.
346  *
347  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
348  * offline from an RCU perspective, so check for those as well.
349  */
350 static inline int rcu_read_lock_held(void)
351 {
352 	if (!debug_lockdep_rcu_enabled())
353 		return 1;
354 	if (!rcu_is_watching())
355 		return 0;
356 	if (!rcu_lockdep_current_cpu_online())
357 		return 0;
358 	return lock_is_held(&rcu_lock_map);
359 }
360 
361 /*
362  * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
363  * hell.
364  */
365 extern int rcu_read_lock_bh_held(void);
366 
367 /**
368  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
369  *
370  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
371  * RCU-sched read-side critical section.  In absence of
372  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
373  * critical section unless it can prove otherwise.  Note that disabling
374  * of preemption (including disabling irqs) counts as an RCU-sched
375  * read-side critical section.  This is useful for debug checks in functions
376  * that required that they be called within an RCU-sched read-side
377  * critical section.
378  *
379  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
380  * and while lockdep is disabled.
381  *
382  * Note that if the CPU is in the idle loop from an RCU point of
383  * view (ie: that we are in the section between rcu_idle_enter() and
384  * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
385  * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
386  * that are in such a section, considering these as in extended quiescent
387  * state, so such a CPU is effectively never in an RCU read-side critical
388  * section regardless of what RCU primitives it invokes.  This state of
389  * affairs is required --- we need to keep an RCU-free window in idle
390  * where the CPU may possibly enter into low power mode. This way we can
391  * notice an extended quiescent state to other CPUs that started a grace
392  * period. Otherwise we would delay any grace period as long as we run in
393  * the idle task.
394  *
395  * Similarly, we avoid claiming an SRCU read lock held if the current
396  * CPU is offline.
397  */
398 #ifdef CONFIG_PREEMPT_COUNT
399 static inline int rcu_read_lock_sched_held(void)
400 {
401 	int lockdep_opinion = 0;
402 
403 	if (!debug_lockdep_rcu_enabled())
404 		return 1;
405 	if (!rcu_is_watching())
406 		return 0;
407 	if (!rcu_lockdep_current_cpu_online())
408 		return 0;
409 	if (debug_locks)
410 		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
411 	return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
412 }
413 #else /* #ifdef CONFIG_PREEMPT_COUNT */
414 static inline int rcu_read_lock_sched_held(void)
415 {
416 	return 1;
417 }
418 #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
419 
420 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
421 
422 # define rcu_lock_acquire(a)		do { } while (0)
423 # define rcu_lock_release(a)		do { } while (0)
424 
425 static inline int rcu_read_lock_held(void)
426 {
427 	return 1;
428 }
429 
430 static inline int rcu_read_lock_bh_held(void)
431 {
432 	return 1;
433 }
434 
435 #ifdef CONFIG_PREEMPT_COUNT
436 static inline int rcu_read_lock_sched_held(void)
437 {
438 	return preempt_count() != 0 || irqs_disabled();
439 }
440 #else /* #ifdef CONFIG_PREEMPT_COUNT */
441 static inline int rcu_read_lock_sched_held(void)
442 {
443 	return 1;
444 }
445 #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
446 
447 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
448 
449 #ifdef CONFIG_PROVE_RCU
450 
451 extern int rcu_my_thread_group_empty(void);
452 
453 /**
454  * rcu_lockdep_assert - emit lockdep splat if specified condition not met
455  * @c: condition to check
456  * @s: informative message
457  */
458 #define rcu_lockdep_assert(c, s)					\
459 	do {								\
460 		static bool __section(.data.unlikely) __warned;		\
461 		if (debug_lockdep_rcu_enabled() && !__warned && !(c)) {	\
462 			__warned = true;				\
463 			lockdep_rcu_suspicious(__FILE__, __LINE__, s);	\
464 		}							\
465 	} while (0)
466 
467 #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
468 static inline void rcu_preempt_sleep_check(void)
469 {
470 	rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
471 			   "Illegal context switch in RCU read-side critical section");
472 }
473 #else /* #ifdef CONFIG_PROVE_RCU */
474 static inline void rcu_preempt_sleep_check(void)
475 {
476 }
477 #endif /* #else #ifdef CONFIG_PROVE_RCU */
478 
479 #define rcu_sleep_check()						\
480 	do {								\
481 		rcu_preempt_sleep_check();				\
482 		rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),	\
483 				   "Illegal context switch in RCU-bh"	\
484 				   " read-side critical section");	\
485 		rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),	\
486 				   "Illegal context switch in RCU-sched"\
487 				   " read-side critical section");	\
488 	} while (0)
489 
490 #else /* #ifdef CONFIG_PROVE_RCU */
491 
492 #define rcu_lockdep_assert(c, s) do { } while (0)
493 #define rcu_sleep_check() do { } while (0)
494 
495 #endif /* #else #ifdef CONFIG_PROVE_RCU */
496 
497 /*
498  * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
499  * and rcu_assign_pointer().  Some of these could be folded into their
500  * callers, but they are left separate in order to ease introduction of
501  * multiple flavors of pointers to match the multiple flavors of RCU
502  * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
503  * the future.
504  */
505 
506 #ifdef __CHECKER__
507 #define rcu_dereference_sparse(p, space) \
508 	((void)(((typeof(*p) space *)p) == p))
509 #else /* #ifdef __CHECKER__ */
510 #define rcu_dereference_sparse(p, space)
511 #endif /* #else #ifdef __CHECKER__ */
512 
513 #define __rcu_access_pointer(p, space) \
514 	({ \
515 		typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
516 		rcu_dereference_sparse(p, space); \
517 		((typeof(*p) __force __kernel *)(_________p1)); \
518 	})
519 #define __rcu_dereference_check(p, c, space) \
520 	({ \
521 		typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
522 		rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \
523 				      " usage"); \
524 		rcu_dereference_sparse(p, space); \
525 		smp_read_barrier_depends(); \
526 		((typeof(*p) __force __kernel *)(_________p1)); \
527 	})
528 #define __rcu_dereference_protected(p, c, space) \
529 	({ \
530 		rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \
531 				      " usage"); \
532 		rcu_dereference_sparse(p, space); \
533 		((typeof(*p) __force __kernel *)(p)); \
534 	})
535 
536 #define __rcu_access_index(p, space) \
537 	({ \
538 		typeof(p) _________p1 = ACCESS_ONCE(p); \
539 		rcu_dereference_sparse(p, space); \
540 		(_________p1); \
541 	})
542 #define __rcu_dereference_index_check(p, c) \
543 	({ \
544 		typeof(p) _________p1 = ACCESS_ONCE(p); \
545 		rcu_lockdep_assert(c, \
546 				   "suspicious rcu_dereference_index_check()" \
547 				   " usage"); \
548 		smp_read_barrier_depends(); \
549 		(_________p1); \
550 	})
551 #define __rcu_assign_pointer(p, v, space) \
552 	do { \
553 		smp_wmb(); \
554 		(p) = (typeof(*v) __force space *)(v); \
555 	} while (0)
556 
557 
558 /**
559  * rcu_access_pointer() - fetch RCU pointer with no dereferencing
560  * @p: The pointer to read
561  *
562  * Return the value of the specified RCU-protected pointer, but omit the
563  * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
564  * when the value of this pointer is accessed, but the pointer is not
565  * dereferenced, for example, when testing an RCU-protected pointer against
566  * NULL.  Although rcu_access_pointer() may also be used in cases where
567  * update-side locks prevent the value of the pointer from changing, you
568  * should instead use rcu_dereference_protected() for this use case.
569  *
570  * It is also permissible to use rcu_access_pointer() when read-side
571  * access to the pointer was removed at least one grace period ago, as
572  * is the case in the context of the RCU callback that is freeing up
573  * the data, or after a synchronize_rcu() returns.  This can be useful
574  * when tearing down multi-linked structures after a grace period
575  * has elapsed.
576  */
577 #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
578 
579 /**
580  * rcu_dereference_check() - rcu_dereference with debug checking
581  * @p: The pointer to read, prior to dereferencing
582  * @c: The conditions under which the dereference will take place
583  *
584  * Do an rcu_dereference(), but check that the conditions under which the
585  * dereference will take place are correct.  Typically the conditions
586  * indicate the various locking conditions that should be held at that
587  * point.  The check should return true if the conditions are satisfied.
588  * An implicit check for being in an RCU read-side critical section
589  * (rcu_read_lock()) is included.
590  *
591  * For example:
592  *
593  *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
594  *
595  * could be used to indicate to lockdep that foo->bar may only be dereferenced
596  * if either rcu_read_lock() is held, or that the lock required to replace
597  * the bar struct at foo->bar is held.
598  *
599  * Note that the list of conditions may also include indications of when a lock
600  * need not be held, for example during initialisation or destruction of the
601  * target struct:
602  *
603  *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
604  *					      atomic_read(&foo->usage) == 0);
605  *
606  * Inserts memory barriers on architectures that require them
607  * (currently only the Alpha), prevents the compiler from refetching
608  * (and from merging fetches), and, more importantly, documents exactly
609  * which pointers are protected by RCU and checks that the pointer is
610  * annotated as __rcu.
611  */
612 #define rcu_dereference_check(p, c) \
613 	__rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
614 
615 /**
616  * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
617  * @p: The pointer to read, prior to dereferencing
618  * @c: The conditions under which the dereference will take place
619  *
620  * This is the RCU-bh counterpart to rcu_dereference_check().
621  */
622 #define rcu_dereference_bh_check(p, c) \
623 	__rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
624 
625 /**
626  * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
627  * @p: The pointer to read, prior to dereferencing
628  * @c: The conditions under which the dereference will take place
629  *
630  * This is the RCU-sched counterpart to rcu_dereference_check().
631  */
632 #define rcu_dereference_sched_check(p, c) \
633 	__rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
634 				__rcu)
635 
636 #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
637 
638 /*
639  * The tracing infrastructure traces RCU (we want that), but unfortunately
640  * some of the RCU checks causes tracing to lock up the system.
641  *
642  * The tracing version of rcu_dereference_raw() must not call
643  * rcu_read_lock_held().
644  */
645 #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
646 
647 /**
648  * rcu_access_index() - fetch RCU index with no dereferencing
649  * @p: The index to read
650  *
651  * Return the value of the specified RCU-protected index, but omit the
652  * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
653  * when the value of this index is accessed, but the index is not
654  * dereferenced, for example, when testing an RCU-protected index against
655  * -1.  Although rcu_access_index() may also be used in cases where
656  * update-side locks prevent the value of the index from changing, you
657  * should instead use rcu_dereference_index_protected() for this use case.
658  */
659 #define rcu_access_index(p) __rcu_access_index((p), __rcu)
660 
661 /**
662  * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
663  * @p: The pointer to read, prior to dereferencing
664  * @c: The conditions under which the dereference will take place
665  *
666  * Similar to rcu_dereference_check(), but omits the sparse checking.
667  * This allows rcu_dereference_index_check() to be used on integers,
668  * which can then be used as array indices.  Attempting to use
669  * rcu_dereference_check() on an integer will give compiler warnings
670  * because the sparse address-space mechanism relies on dereferencing
671  * the RCU-protected pointer.  Dereferencing integers is not something
672  * that even gcc will put up with.
673  *
674  * Note that this function does not implicitly check for RCU read-side
675  * critical sections.  If this function gains lots of uses, it might
676  * make sense to provide versions for each flavor of RCU, but it does
677  * not make sense as of early 2010.
678  */
679 #define rcu_dereference_index_check(p, c) \
680 	__rcu_dereference_index_check((p), (c))
681 
682 /**
683  * rcu_dereference_protected() - fetch RCU pointer when updates prevented
684  * @p: The pointer to read, prior to dereferencing
685  * @c: The conditions under which the dereference will take place
686  *
687  * Return the value of the specified RCU-protected pointer, but omit
688  * both the smp_read_barrier_depends() and the ACCESS_ONCE().  This
689  * is useful in cases where update-side locks prevent the value of the
690  * pointer from changing.  Please note that this primitive does -not-
691  * prevent the compiler from repeating this reference or combining it
692  * with other references, so it should not be used without protection
693  * of appropriate locks.
694  *
695  * This function is only for update-side use.  Using this function
696  * when protected only by rcu_read_lock() will result in infrequent
697  * but very ugly failures.
698  */
699 #define rcu_dereference_protected(p, c) \
700 	__rcu_dereference_protected((p), (c), __rcu)
701 
702 
703 /**
704  * rcu_dereference() - fetch RCU-protected pointer for dereferencing
705  * @p: The pointer to read, prior to dereferencing
706  *
707  * This is a simple wrapper around rcu_dereference_check().
708  */
709 #define rcu_dereference(p) rcu_dereference_check(p, 0)
710 
711 /**
712  * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
713  * @p: The pointer to read, prior to dereferencing
714  *
715  * Makes rcu_dereference_check() do the dirty work.
716  */
717 #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
718 
719 /**
720  * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
721  * @p: The pointer to read, prior to dereferencing
722  *
723  * Makes rcu_dereference_check() do the dirty work.
724  */
725 #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
726 
727 /**
728  * rcu_read_lock() - mark the beginning of an RCU read-side critical section
729  *
730  * When synchronize_rcu() is invoked on one CPU while other CPUs
731  * are within RCU read-side critical sections, then the
732  * synchronize_rcu() is guaranteed to block until after all the other
733  * CPUs exit their critical sections.  Similarly, if call_rcu() is invoked
734  * on one CPU while other CPUs are within RCU read-side critical
735  * sections, invocation of the corresponding RCU callback is deferred
736  * until after the all the other CPUs exit their critical sections.
737  *
738  * Note, however, that RCU callbacks are permitted to run concurrently
739  * with new RCU read-side critical sections.  One way that this can happen
740  * is via the following sequence of events: (1) CPU 0 enters an RCU
741  * read-side critical section, (2) CPU 1 invokes call_rcu() to register
742  * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
743  * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
744  * callback is invoked.  This is legal, because the RCU read-side critical
745  * section that was running concurrently with the call_rcu() (and which
746  * therefore might be referencing something that the corresponding RCU
747  * callback would free up) has completed before the corresponding
748  * RCU callback is invoked.
749  *
750  * RCU read-side critical sections may be nested.  Any deferred actions
751  * will be deferred until the outermost RCU read-side critical section
752  * completes.
753  *
754  * You can avoid reading and understanding the next paragraph by
755  * following this rule: don't put anything in an rcu_read_lock() RCU
756  * read-side critical section that would block in a !PREEMPT kernel.
757  * But if you want the full story, read on!
758  *
759  * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
760  * is illegal to block while in an RCU read-side critical section.  In
761  * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
762  * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
763  * be preempted, but explicit blocking is illegal.  Finally, in preemptible
764  * RCU implementations in real-time (with -rt patchset) kernel builds,
765  * RCU read-side critical sections may be preempted and they may also
766  * block, but only when acquiring spinlocks that are subject to priority
767  * inheritance.
768  */
769 static inline void rcu_read_lock(void)
770 {
771 	__rcu_read_lock();
772 	__acquire(RCU);
773 	rcu_lock_acquire(&rcu_lock_map);
774 	rcu_lockdep_assert(rcu_is_watching(),
775 			   "rcu_read_lock() used illegally while idle");
776 }
777 
778 /*
779  * So where is rcu_write_lock()?  It does not exist, as there is no
780  * way for writers to lock out RCU readers.  This is a feature, not
781  * a bug -- this property is what provides RCU's performance benefits.
782  * Of course, writers must coordinate with each other.  The normal
783  * spinlock primitives work well for this, but any other technique may be
784  * used as well.  RCU does not care how the writers keep out of each
785  * others' way, as long as they do so.
786  */
787 
788 /**
789  * rcu_read_unlock() - marks the end of an RCU read-side critical section.
790  *
791  * See rcu_read_lock() for more information.
792  */
793 static inline void rcu_read_unlock(void)
794 {
795 	rcu_lockdep_assert(rcu_is_watching(),
796 			   "rcu_read_unlock() used illegally while idle");
797 	rcu_lock_release(&rcu_lock_map);
798 	__release(RCU);
799 	__rcu_read_unlock();
800 }
801 
802 /**
803  * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
804  *
805  * This is equivalent of rcu_read_lock(), but to be used when updates
806  * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
807  * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
808  * softirq handler to be a quiescent state, a process in RCU read-side
809  * critical section must be protected by disabling softirqs. Read-side
810  * critical sections in interrupt context can use just rcu_read_lock(),
811  * though this should at least be commented to avoid confusing people
812  * reading the code.
813  *
814  * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
815  * must occur in the same context, for example, it is illegal to invoke
816  * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
817  * was invoked from some other task.
818  */
819 static inline void rcu_read_lock_bh(void)
820 {
821 	local_bh_disable();
822 	__acquire(RCU_BH);
823 	rcu_lock_acquire(&rcu_bh_lock_map);
824 	rcu_lockdep_assert(rcu_is_watching(),
825 			   "rcu_read_lock_bh() used illegally while idle");
826 }
827 
828 /*
829  * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
830  *
831  * See rcu_read_lock_bh() for more information.
832  */
833 static inline void rcu_read_unlock_bh(void)
834 {
835 	rcu_lockdep_assert(rcu_is_watching(),
836 			   "rcu_read_unlock_bh() used illegally while idle");
837 	rcu_lock_release(&rcu_bh_lock_map);
838 	__release(RCU_BH);
839 	local_bh_enable();
840 }
841 
842 /**
843  * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
844  *
845  * This is equivalent of rcu_read_lock(), but to be used when updates
846  * are being done using call_rcu_sched() or synchronize_rcu_sched().
847  * Read-side critical sections can also be introduced by anything that
848  * disables preemption, including local_irq_disable() and friends.
849  *
850  * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
851  * must occur in the same context, for example, it is illegal to invoke
852  * rcu_read_unlock_sched() from process context if the matching
853  * rcu_read_lock_sched() was invoked from an NMI handler.
854  */
855 static inline void rcu_read_lock_sched(void)
856 {
857 	preempt_disable();
858 	__acquire(RCU_SCHED);
859 	rcu_lock_acquire(&rcu_sched_lock_map);
860 	rcu_lockdep_assert(rcu_is_watching(),
861 			   "rcu_read_lock_sched() used illegally while idle");
862 }
863 
864 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
865 static inline notrace void rcu_read_lock_sched_notrace(void)
866 {
867 	preempt_disable_notrace();
868 	__acquire(RCU_SCHED);
869 }
870 
871 /*
872  * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
873  *
874  * See rcu_read_lock_sched for more information.
875  */
876 static inline void rcu_read_unlock_sched(void)
877 {
878 	rcu_lockdep_assert(rcu_is_watching(),
879 			   "rcu_read_unlock_sched() used illegally while idle");
880 	rcu_lock_release(&rcu_sched_lock_map);
881 	__release(RCU_SCHED);
882 	preempt_enable();
883 }
884 
885 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
886 static inline notrace void rcu_read_unlock_sched_notrace(void)
887 {
888 	__release(RCU_SCHED);
889 	preempt_enable_notrace();
890 }
891 
892 /**
893  * rcu_assign_pointer() - assign to RCU-protected pointer
894  * @p: pointer to assign to
895  * @v: value to assign (publish)
896  *
897  * Assigns the specified value to the specified RCU-protected
898  * pointer, ensuring that any concurrent RCU readers will see
899  * any prior initialization.
900  *
901  * Inserts memory barriers on architectures that require them
902  * (which is most of them), and also prevents the compiler from
903  * reordering the code that initializes the structure after the pointer
904  * assignment.  More importantly, this call documents which pointers
905  * will be dereferenced by RCU read-side code.
906  *
907  * In some special cases, you may use RCU_INIT_POINTER() instead
908  * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
909  * to the fact that it does not constrain either the CPU or the compiler.
910  * That said, using RCU_INIT_POINTER() when you should have used
911  * rcu_assign_pointer() is a very bad thing that results in
912  * impossible-to-diagnose memory corruption.  So please be careful.
913  * See the RCU_INIT_POINTER() comment header for details.
914  */
915 #define rcu_assign_pointer(p, v) \
916 	__rcu_assign_pointer((p), (v), __rcu)
917 
918 /**
919  * RCU_INIT_POINTER() - initialize an RCU protected pointer
920  *
921  * Initialize an RCU-protected pointer in special cases where readers
922  * do not need ordering constraints on the CPU or the compiler.  These
923  * special cases are:
924  *
925  * 1.	This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
926  * 2.	The caller has taken whatever steps are required to prevent
927  *	RCU readers from concurrently accessing this pointer -or-
928  * 3.	The referenced data structure has already been exposed to
929  *	readers either at compile time or via rcu_assign_pointer() -and-
930  *	a.	You have not made -any- reader-visible changes to
931  *		this structure since then -or-
932  *	b.	It is OK for readers accessing this structure from its
933  *		new location to see the old state of the structure.  (For
934  *		example, the changes were to statistical counters or to
935  *		other state where exact synchronization is not required.)
936  *
937  * Failure to follow these rules governing use of RCU_INIT_POINTER() will
938  * result in impossible-to-diagnose memory corruption.  As in the structures
939  * will look OK in crash dumps, but any concurrent RCU readers might
940  * see pre-initialized values of the referenced data structure.  So
941  * please be very careful how you use RCU_INIT_POINTER()!!!
942  *
943  * If you are creating an RCU-protected linked structure that is accessed
944  * by a single external-to-structure RCU-protected pointer, then you may
945  * use RCU_INIT_POINTER() to initialize the internal RCU-protected
946  * pointers, but you must use rcu_assign_pointer() to initialize the
947  * external-to-structure pointer -after- you have completely initialized
948  * the reader-accessible portions of the linked structure.
949  */
950 #define RCU_INIT_POINTER(p, v) \
951 	do { \
952 		p = (typeof(*v) __force __rcu *)(v); \
953 	} while (0)
954 
955 /**
956  * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
957  *
958  * GCC-style initialization for an RCU-protected pointer in a structure field.
959  */
960 #define RCU_POINTER_INITIALIZER(p, v) \
961 		.p = (typeof(*v) __force __rcu *)(v)
962 
963 /*
964  * Does the specified offset indicate that the corresponding rcu_head
965  * structure can be handled by kfree_rcu()?
966  */
967 #define __is_kfree_rcu_offset(offset) ((offset) < 4096)
968 
969 /*
970  * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
971  */
972 #define __kfree_rcu(head, offset) \
973 	do { \
974 		BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
975 		kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
976 	} while (0)
977 
978 /**
979  * kfree_rcu() - kfree an object after a grace period.
980  * @ptr:	pointer to kfree
981  * @rcu_head:	the name of the struct rcu_head within the type of @ptr.
982  *
983  * Many rcu callbacks functions just call kfree() on the base structure.
984  * These functions are trivial, but their size adds up, and furthermore
985  * when they are used in a kernel module, that module must invoke the
986  * high-latency rcu_barrier() function at module-unload time.
987  *
988  * The kfree_rcu() function handles this issue.  Rather than encoding a
989  * function address in the embedded rcu_head structure, kfree_rcu() instead
990  * encodes the offset of the rcu_head structure within the base structure.
991  * Because the functions are not allowed in the low-order 4096 bytes of
992  * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
993  * If the offset is larger than 4095 bytes, a compile-time error will
994  * be generated in __kfree_rcu().  If this error is triggered, you can
995  * either fall back to use of call_rcu() or rearrange the structure to
996  * position the rcu_head structure into the first 4096 bytes.
997  *
998  * Note that the allowable offset might decrease in the future, for example,
999  * to allow something like kmem_cache_free_rcu().
1000  *
1001  * The BUILD_BUG_ON check must not involve any function calls, hence the
1002  * checks are done in macros here.
1003  */
1004 #define kfree_rcu(ptr, rcu_head)					\
1005 	__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
1006 
1007 #ifdef CONFIG_RCU_NOCB_CPU
1008 extern bool rcu_is_nocb_cpu(int cpu);
1009 #else
1010 static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
1011 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
1012 
1013 
1014 /* Only for use by adaptive-ticks code. */
1015 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
1016 extern bool rcu_sys_is_idle(void);
1017 extern void rcu_sysidle_force_exit(void);
1018 #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
1019 
1020 static inline bool rcu_sys_is_idle(void)
1021 {
1022 	return false;
1023 }
1024 
1025 static inline void rcu_sysidle_force_exit(void)
1026 {
1027 }
1028 
1029 #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
1030 
1031 
1032 #endif /* __LINUX_RCUPDATE_H */
1033