xref: /linux-6.15/include/linux/rcupdate.h (revision f5e4e7fd)
1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright IBM Corporation, 2001
19  *
20  * Author: Dipankar Sarma <[email protected]>
21  *
22  * Based on the original work by Paul McKenney <[email protected]>
23  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24  * Papers:
25  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27  *
28  * For detailed explanation of Read-Copy Update mechanism see -
29  *		http://lse.sourceforge.net/locking/rcupdate.html
30  *
31  */
32 
33 #ifndef __LINUX_RCUPDATE_H
34 #define __LINUX_RCUPDATE_H
35 
36 #include <linux/types.h>
37 #include <linux/cache.h>
38 #include <linux/spinlock.h>
39 #include <linux/threads.h>
40 #include <linux/cpumask.h>
41 #include <linux/seqlock.h>
42 #include <linux/lockdep.h>
43 #include <linux/completion.h>
44 #include <linux/debugobjects.h>
45 #include <linux/bug.h>
46 #include <linux/compiler.h>
47 
48 #ifdef CONFIG_RCU_TORTURE_TEST
49 extern int rcutorture_runnable; /* for sysctl */
50 #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
51 
52 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
53 extern void rcutorture_record_test_transition(void);
54 extern void rcutorture_record_progress(unsigned long vernum);
55 extern void do_trace_rcu_torture_read(char *rcutorturename,
56 				      struct rcu_head *rhp,
57 				      unsigned long secs,
58 				      unsigned long c_old,
59 				      unsigned long c);
60 #else
61 static inline void rcutorture_record_test_transition(void)
62 {
63 }
64 static inline void rcutorture_record_progress(unsigned long vernum)
65 {
66 }
67 #ifdef CONFIG_RCU_TRACE
68 extern void do_trace_rcu_torture_read(char *rcutorturename,
69 				      struct rcu_head *rhp,
70 				      unsigned long secs,
71 				      unsigned long c_old,
72 				      unsigned long c);
73 #else
74 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
75 	do { } while (0)
76 #endif
77 #endif
78 
79 #define UINT_CMP_GE(a, b)	(UINT_MAX / 2 >= (a) - (b))
80 #define UINT_CMP_LT(a, b)	(UINT_MAX / 2 < (a) - (b))
81 #define ULONG_CMP_GE(a, b)	(ULONG_MAX / 2 >= (a) - (b))
82 #define ULONG_CMP_LT(a, b)	(ULONG_MAX / 2 < (a) - (b))
83 #define ulong2long(a)		(*(long *)(&(a)))
84 
85 /* Exported common interfaces */
86 
87 #ifdef CONFIG_PREEMPT_RCU
88 
89 /**
90  * call_rcu() - Queue an RCU callback for invocation after a grace period.
91  * @head: structure to be used for queueing the RCU updates.
92  * @func: actual callback function to be invoked after the grace period
93  *
94  * The callback function will be invoked some time after a full grace
95  * period elapses, in other words after all pre-existing RCU read-side
96  * critical sections have completed.  However, the callback function
97  * might well execute concurrently with RCU read-side critical sections
98  * that started after call_rcu() was invoked.  RCU read-side critical
99  * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
100  * and may be nested.
101  *
102  * Note that all CPUs must agree that the grace period extended beyond
103  * all pre-existing RCU read-side critical section.  On systems with more
104  * than one CPU, this means that when "func()" is invoked, each CPU is
105  * guaranteed to have executed a full memory barrier since the end of its
106  * last RCU read-side critical section whose beginning preceded the call
107  * to call_rcu().  It also means that each CPU executing an RCU read-side
108  * critical section that continues beyond the start of "func()" must have
109  * executed a memory barrier after the call_rcu() but before the beginning
110  * of that RCU read-side critical section.  Note that these guarantees
111  * include CPUs that are offline, idle, or executing in user mode, as
112  * well as CPUs that are executing in the kernel.
113  *
114  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
115  * resulting RCU callback function "func()", then both CPU A and CPU B are
116  * guaranteed to execute a full memory barrier during the time interval
117  * between the call to call_rcu() and the invocation of "func()" -- even
118  * if CPU A and CPU B are the same CPU (but again only if the system has
119  * more than one CPU).
120  */
121 extern void call_rcu(struct rcu_head *head,
122 			      void (*func)(struct rcu_head *head));
123 
124 #else /* #ifdef CONFIG_PREEMPT_RCU */
125 
126 /* In classic RCU, call_rcu() is just call_rcu_sched(). */
127 #define	call_rcu	call_rcu_sched
128 
129 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
130 
131 /**
132  * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
133  * @head: structure to be used for queueing the RCU updates.
134  * @func: actual callback function to be invoked after the grace period
135  *
136  * The callback function will be invoked some time after a full grace
137  * period elapses, in other words after all currently executing RCU
138  * read-side critical sections have completed. call_rcu_bh() assumes
139  * that the read-side critical sections end on completion of a softirq
140  * handler. This means that read-side critical sections in process
141  * context must not be interrupted by softirqs. This interface is to be
142  * used when most of the read-side critical sections are in softirq context.
143  * RCU read-side critical sections are delimited by :
144  *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
145  *  OR
146  *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
147  *  These may be nested.
148  *
149  * See the description of call_rcu() for more detailed information on
150  * memory ordering guarantees.
151  */
152 extern void call_rcu_bh(struct rcu_head *head,
153 			void (*func)(struct rcu_head *head));
154 
155 /**
156  * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
157  * @head: structure to be used for queueing the RCU updates.
158  * @func: actual callback function to be invoked after the grace period
159  *
160  * The callback function will be invoked some time after a full grace
161  * period elapses, in other words after all currently executing RCU
162  * read-side critical sections have completed. call_rcu_sched() assumes
163  * that the read-side critical sections end on enabling of preemption
164  * or on voluntary preemption.
165  * RCU read-side critical sections are delimited by :
166  *  - rcu_read_lock_sched() and  rcu_read_unlock_sched(),
167  *  OR
168  *  anything that disables preemption.
169  *  These may be nested.
170  *
171  * See the description of call_rcu() for more detailed information on
172  * memory ordering guarantees.
173  */
174 extern void call_rcu_sched(struct rcu_head *head,
175 			   void (*func)(struct rcu_head *rcu));
176 
177 extern void synchronize_sched(void);
178 
179 #ifdef CONFIG_PREEMPT_RCU
180 
181 extern void __rcu_read_lock(void);
182 extern void __rcu_read_unlock(void);
183 extern void rcu_read_unlock_special(struct task_struct *t);
184 void synchronize_rcu(void);
185 
186 /*
187  * Defined as a macro as it is a very low level header included from
188  * areas that don't even know about current.  This gives the rcu_read_lock()
189  * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
190  * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
191  */
192 #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
193 
194 #else /* #ifdef CONFIG_PREEMPT_RCU */
195 
196 static inline void __rcu_read_lock(void)
197 {
198 	preempt_disable();
199 }
200 
201 static inline void __rcu_read_unlock(void)
202 {
203 	preempt_enable();
204 }
205 
206 static inline void synchronize_rcu(void)
207 {
208 	synchronize_sched();
209 }
210 
211 static inline int rcu_preempt_depth(void)
212 {
213 	return 0;
214 }
215 
216 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
217 
218 /* Internal to kernel */
219 extern void rcu_init(void);
220 extern void rcu_sched_qs(int cpu);
221 extern void rcu_bh_qs(int cpu);
222 extern void rcu_check_callbacks(int cpu, int user);
223 struct notifier_block;
224 extern void rcu_idle_enter(void);
225 extern void rcu_idle_exit(void);
226 extern void rcu_irq_enter(void);
227 extern void rcu_irq_exit(void);
228 
229 #ifdef CONFIG_RCU_USER_QS
230 extern void rcu_user_enter(void);
231 extern void rcu_user_exit(void);
232 extern void rcu_user_enter_after_irq(void);
233 extern void rcu_user_exit_after_irq(void);
234 #else
235 static inline void rcu_user_enter(void) { }
236 static inline void rcu_user_exit(void) { }
237 static inline void rcu_user_enter_after_irq(void) { }
238 static inline void rcu_user_exit_after_irq(void) { }
239 static inline void rcu_user_hooks_switch(struct task_struct *prev,
240 					 struct task_struct *next) { }
241 #endif /* CONFIG_RCU_USER_QS */
242 
243 /**
244  * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
245  * @a: Code that RCU needs to pay attention to.
246  *
247  * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
248  * in the inner idle loop, that is, between the rcu_idle_enter() and
249  * the rcu_idle_exit() -- RCU will happily ignore any such read-side
250  * critical sections.  However, things like powertop need tracepoints
251  * in the inner idle loop.
252  *
253  * This macro provides the way out:  RCU_NONIDLE(do_something_with_RCU())
254  * will tell RCU that it needs to pay attending, invoke its argument
255  * (in this example, a call to the do_something_with_RCU() function),
256  * and then tell RCU to go back to ignoring this CPU.  It is permissible
257  * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
258  * quite limited.  If deeper nesting is required, it will be necessary
259  * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
260  */
261 #define RCU_NONIDLE(a) \
262 	do { \
263 		rcu_irq_enter(); \
264 		do { a; } while (0); \
265 		rcu_irq_exit(); \
266 	} while (0)
267 
268 /*
269  * Infrastructure to implement the synchronize_() primitives in
270  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
271  */
272 
273 typedef void call_rcu_func_t(struct rcu_head *head,
274 			     void (*func)(struct rcu_head *head));
275 void wait_rcu_gp(call_rcu_func_t crf);
276 
277 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
278 #include <linux/rcutree.h>
279 #elif defined(CONFIG_TINY_RCU)
280 #include <linux/rcutiny.h>
281 #else
282 #error "Unknown RCU implementation specified to kernel configuration"
283 #endif
284 
285 /*
286  * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
287  * initialization and destruction of rcu_head on the stack. rcu_head structures
288  * allocated dynamically in the heap or defined statically don't need any
289  * initialization.
290  */
291 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
292 extern void init_rcu_head_on_stack(struct rcu_head *head);
293 extern void destroy_rcu_head_on_stack(struct rcu_head *head);
294 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
295 static inline void init_rcu_head_on_stack(struct rcu_head *head)
296 {
297 }
298 
299 static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
300 {
301 }
302 #endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
303 
304 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
305 extern int rcu_is_cpu_idle(void);
306 #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */
307 
308 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
309 bool rcu_lockdep_current_cpu_online(void);
310 #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
311 static inline bool rcu_lockdep_current_cpu_online(void)
312 {
313 	return 1;
314 }
315 #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
316 
317 #ifdef CONFIG_DEBUG_LOCK_ALLOC
318 
319 static inline void rcu_lock_acquire(struct lockdep_map *map)
320 {
321 	lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
322 }
323 
324 static inline void rcu_lock_release(struct lockdep_map *map)
325 {
326 	lock_release(map, 1, _THIS_IP_);
327 }
328 
329 extern struct lockdep_map rcu_lock_map;
330 extern struct lockdep_map rcu_bh_lock_map;
331 extern struct lockdep_map rcu_sched_lock_map;
332 extern int debug_lockdep_rcu_enabled(void);
333 
334 /**
335  * rcu_read_lock_held() - might we be in RCU read-side critical section?
336  *
337  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
338  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
339  * this assumes we are in an RCU read-side critical section unless it can
340  * prove otherwise.  This is useful for debug checks in functions that
341  * require that they be called within an RCU read-side critical section.
342  *
343  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
344  * and while lockdep is disabled.
345  *
346  * Note that rcu_read_lock() and the matching rcu_read_unlock() must
347  * occur in the same context, for example, it is illegal to invoke
348  * rcu_read_unlock() in process context if the matching rcu_read_lock()
349  * was invoked from within an irq handler.
350  *
351  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
352  * offline from an RCU perspective, so check for those as well.
353  */
354 static inline int rcu_read_lock_held(void)
355 {
356 	if (!debug_lockdep_rcu_enabled())
357 		return 1;
358 	if (rcu_is_cpu_idle())
359 		return 0;
360 	if (!rcu_lockdep_current_cpu_online())
361 		return 0;
362 	return lock_is_held(&rcu_lock_map);
363 }
364 
365 /*
366  * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
367  * hell.
368  */
369 extern int rcu_read_lock_bh_held(void);
370 
371 /**
372  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
373  *
374  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
375  * RCU-sched read-side critical section.  In absence of
376  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
377  * critical section unless it can prove otherwise.  Note that disabling
378  * of preemption (including disabling irqs) counts as an RCU-sched
379  * read-side critical section.  This is useful for debug checks in functions
380  * that required that they be called within an RCU-sched read-side
381  * critical section.
382  *
383  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
384  * and while lockdep is disabled.
385  *
386  * Note that if the CPU is in the idle loop from an RCU point of
387  * view (ie: that we are in the section between rcu_idle_enter() and
388  * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
389  * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
390  * that are in such a section, considering these as in extended quiescent
391  * state, so such a CPU is effectively never in an RCU read-side critical
392  * section regardless of what RCU primitives it invokes.  This state of
393  * affairs is required --- we need to keep an RCU-free window in idle
394  * where the CPU may possibly enter into low power mode. This way we can
395  * notice an extended quiescent state to other CPUs that started a grace
396  * period. Otherwise we would delay any grace period as long as we run in
397  * the idle task.
398  *
399  * Similarly, we avoid claiming an SRCU read lock held if the current
400  * CPU is offline.
401  */
402 #ifdef CONFIG_PREEMPT_COUNT
403 static inline int rcu_read_lock_sched_held(void)
404 {
405 	int lockdep_opinion = 0;
406 
407 	if (!debug_lockdep_rcu_enabled())
408 		return 1;
409 	if (rcu_is_cpu_idle())
410 		return 0;
411 	if (!rcu_lockdep_current_cpu_online())
412 		return 0;
413 	if (debug_locks)
414 		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
415 	return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
416 }
417 #else /* #ifdef CONFIG_PREEMPT_COUNT */
418 static inline int rcu_read_lock_sched_held(void)
419 {
420 	return 1;
421 }
422 #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
423 
424 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
425 
426 # define rcu_lock_acquire(a)		do { } while (0)
427 # define rcu_lock_release(a)		do { } while (0)
428 
429 static inline int rcu_read_lock_held(void)
430 {
431 	return 1;
432 }
433 
434 static inline int rcu_read_lock_bh_held(void)
435 {
436 	return 1;
437 }
438 
439 #ifdef CONFIG_PREEMPT_COUNT
440 static inline int rcu_read_lock_sched_held(void)
441 {
442 	return preempt_count() != 0 || irqs_disabled();
443 }
444 #else /* #ifdef CONFIG_PREEMPT_COUNT */
445 static inline int rcu_read_lock_sched_held(void)
446 {
447 	return 1;
448 }
449 #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
450 
451 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
452 
453 #ifdef CONFIG_PROVE_RCU
454 
455 extern int rcu_my_thread_group_empty(void);
456 
457 /**
458  * rcu_lockdep_assert - emit lockdep splat if specified condition not met
459  * @c: condition to check
460  * @s: informative message
461  */
462 #define rcu_lockdep_assert(c, s)					\
463 	do {								\
464 		static bool __section(.data.unlikely) __warned;		\
465 		if (debug_lockdep_rcu_enabled() && !__warned && !(c)) {	\
466 			__warned = true;				\
467 			lockdep_rcu_suspicious(__FILE__, __LINE__, s);	\
468 		}							\
469 	} while (0)
470 
471 #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
472 static inline void rcu_preempt_sleep_check(void)
473 {
474 	rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
475 			   "Illegal context switch in RCU read-side critical section");
476 }
477 #else /* #ifdef CONFIG_PROVE_RCU */
478 static inline void rcu_preempt_sleep_check(void)
479 {
480 }
481 #endif /* #else #ifdef CONFIG_PROVE_RCU */
482 
483 #define rcu_sleep_check()						\
484 	do {								\
485 		rcu_preempt_sleep_check();				\
486 		rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),	\
487 				   "Illegal context switch in RCU-bh"	\
488 				   " read-side critical section");	\
489 		rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),	\
490 				   "Illegal context switch in RCU-sched"\
491 				   " read-side critical section");	\
492 	} while (0)
493 
494 #else /* #ifdef CONFIG_PROVE_RCU */
495 
496 #define rcu_lockdep_assert(c, s) do { } while (0)
497 #define rcu_sleep_check() do { } while (0)
498 
499 #endif /* #else #ifdef CONFIG_PROVE_RCU */
500 
501 /*
502  * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
503  * and rcu_assign_pointer().  Some of these could be folded into their
504  * callers, but they are left separate in order to ease introduction of
505  * multiple flavors of pointers to match the multiple flavors of RCU
506  * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
507  * the future.
508  */
509 
510 #ifdef __CHECKER__
511 #define rcu_dereference_sparse(p, space) \
512 	((void)(((typeof(*p) space *)p) == p))
513 #else /* #ifdef __CHECKER__ */
514 #define rcu_dereference_sparse(p, space)
515 #endif /* #else #ifdef __CHECKER__ */
516 
517 #define __rcu_access_pointer(p, space) \
518 	({ \
519 		typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
520 		rcu_dereference_sparse(p, space); \
521 		((typeof(*p) __force __kernel *)(_________p1)); \
522 	})
523 #define __rcu_dereference_check(p, c, space) \
524 	({ \
525 		typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
526 		rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \
527 				      " usage"); \
528 		rcu_dereference_sparse(p, space); \
529 		smp_read_barrier_depends(); \
530 		((typeof(*p) __force __kernel *)(_________p1)); \
531 	})
532 #define __rcu_dereference_protected(p, c, space) \
533 	({ \
534 		rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \
535 				      " usage"); \
536 		rcu_dereference_sparse(p, space); \
537 		((typeof(*p) __force __kernel *)(p)); \
538 	})
539 
540 #define __rcu_access_index(p, space) \
541 	({ \
542 		typeof(p) _________p1 = ACCESS_ONCE(p); \
543 		rcu_dereference_sparse(p, space); \
544 		(_________p1); \
545 	})
546 #define __rcu_dereference_index_check(p, c) \
547 	({ \
548 		typeof(p) _________p1 = ACCESS_ONCE(p); \
549 		rcu_lockdep_assert(c, \
550 				   "suspicious rcu_dereference_index_check()" \
551 				   " usage"); \
552 		smp_read_barrier_depends(); \
553 		(_________p1); \
554 	})
555 #define __rcu_assign_pointer(p, v, space) \
556 	do { \
557 		smp_wmb(); \
558 		(p) = (typeof(*v) __force space *)(v); \
559 	} while (0)
560 
561 
562 /**
563  * rcu_access_pointer() - fetch RCU pointer with no dereferencing
564  * @p: The pointer to read
565  *
566  * Return the value of the specified RCU-protected pointer, but omit the
567  * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
568  * when the value of this pointer is accessed, but the pointer is not
569  * dereferenced, for example, when testing an RCU-protected pointer against
570  * NULL.  Although rcu_access_pointer() may also be used in cases where
571  * update-side locks prevent the value of the pointer from changing, you
572  * should instead use rcu_dereference_protected() for this use case.
573  *
574  * It is also permissible to use rcu_access_pointer() when read-side
575  * access to the pointer was removed at least one grace period ago, as
576  * is the case in the context of the RCU callback that is freeing up
577  * the data, or after a synchronize_rcu() returns.  This can be useful
578  * when tearing down multi-linked structures after a grace period
579  * has elapsed.
580  */
581 #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
582 
583 /**
584  * rcu_dereference_check() - rcu_dereference with debug checking
585  * @p: The pointer to read, prior to dereferencing
586  * @c: The conditions under which the dereference will take place
587  *
588  * Do an rcu_dereference(), but check that the conditions under which the
589  * dereference will take place are correct.  Typically the conditions
590  * indicate the various locking conditions that should be held at that
591  * point.  The check should return true if the conditions are satisfied.
592  * An implicit check for being in an RCU read-side critical section
593  * (rcu_read_lock()) is included.
594  *
595  * For example:
596  *
597  *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
598  *
599  * could be used to indicate to lockdep that foo->bar may only be dereferenced
600  * if either rcu_read_lock() is held, or that the lock required to replace
601  * the bar struct at foo->bar is held.
602  *
603  * Note that the list of conditions may also include indications of when a lock
604  * need not be held, for example during initialisation or destruction of the
605  * target struct:
606  *
607  *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
608  *					      atomic_read(&foo->usage) == 0);
609  *
610  * Inserts memory barriers on architectures that require them
611  * (currently only the Alpha), prevents the compiler from refetching
612  * (and from merging fetches), and, more importantly, documents exactly
613  * which pointers are protected by RCU and checks that the pointer is
614  * annotated as __rcu.
615  */
616 #define rcu_dereference_check(p, c) \
617 	__rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
618 
619 /**
620  * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
621  * @p: The pointer to read, prior to dereferencing
622  * @c: The conditions under which the dereference will take place
623  *
624  * This is the RCU-bh counterpart to rcu_dereference_check().
625  */
626 #define rcu_dereference_bh_check(p, c) \
627 	__rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
628 
629 /**
630  * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
631  * @p: The pointer to read, prior to dereferencing
632  * @c: The conditions under which the dereference will take place
633  *
634  * This is the RCU-sched counterpart to rcu_dereference_check().
635  */
636 #define rcu_dereference_sched_check(p, c) \
637 	__rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
638 				__rcu)
639 
640 #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
641 
642 /*
643  * The tracing infrastructure traces RCU (we want that), but unfortunately
644  * some of the RCU checks causes tracing to lock up the system.
645  *
646  * The tracing version of rcu_dereference_raw() must not call
647  * rcu_read_lock_held().
648  */
649 #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
650 
651 /**
652  * rcu_access_index() - fetch RCU index with no dereferencing
653  * @p: The index to read
654  *
655  * Return the value of the specified RCU-protected index, but omit the
656  * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
657  * when the value of this index is accessed, but the index is not
658  * dereferenced, for example, when testing an RCU-protected index against
659  * -1.  Although rcu_access_index() may also be used in cases where
660  * update-side locks prevent the value of the index from changing, you
661  * should instead use rcu_dereference_index_protected() for this use case.
662  */
663 #define rcu_access_index(p) __rcu_access_index((p), __rcu)
664 
665 /**
666  * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
667  * @p: The pointer to read, prior to dereferencing
668  * @c: The conditions under which the dereference will take place
669  *
670  * Similar to rcu_dereference_check(), but omits the sparse checking.
671  * This allows rcu_dereference_index_check() to be used on integers,
672  * which can then be used as array indices.  Attempting to use
673  * rcu_dereference_check() on an integer will give compiler warnings
674  * because the sparse address-space mechanism relies on dereferencing
675  * the RCU-protected pointer.  Dereferencing integers is not something
676  * that even gcc will put up with.
677  *
678  * Note that this function does not implicitly check for RCU read-side
679  * critical sections.  If this function gains lots of uses, it might
680  * make sense to provide versions for each flavor of RCU, but it does
681  * not make sense as of early 2010.
682  */
683 #define rcu_dereference_index_check(p, c) \
684 	__rcu_dereference_index_check((p), (c))
685 
686 /**
687  * rcu_dereference_protected() - fetch RCU pointer when updates prevented
688  * @p: The pointer to read, prior to dereferencing
689  * @c: The conditions under which the dereference will take place
690  *
691  * Return the value of the specified RCU-protected pointer, but omit
692  * both the smp_read_barrier_depends() and the ACCESS_ONCE().  This
693  * is useful in cases where update-side locks prevent the value of the
694  * pointer from changing.  Please note that this primitive does -not-
695  * prevent the compiler from repeating this reference or combining it
696  * with other references, so it should not be used without protection
697  * of appropriate locks.
698  *
699  * This function is only for update-side use.  Using this function
700  * when protected only by rcu_read_lock() will result in infrequent
701  * but very ugly failures.
702  */
703 #define rcu_dereference_protected(p, c) \
704 	__rcu_dereference_protected((p), (c), __rcu)
705 
706 
707 /**
708  * rcu_dereference() - fetch RCU-protected pointer for dereferencing
709  * @p: The pointer to read, prior to dereferencing
710  *
711  * This is a simple wrapper around rcu_dereference_check().
712  */
713 #define rcu_dereference(p) rcu_dereference_check(p, 0)
714 
715 /**
716  * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
717  * @p: The pointer to read, prior to dereferencing
718  *
719  * Makes rcu_dereference_check() do the dirty work.
720  */
721 #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
722 
723 /**
724  * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
725  * @p: The pointer to read, prior to dereferencing
726  *
727  * Makes rcu_dereference_check() do the dirty work.
728  */
729 #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
730 
731 /**
732  * rcu_read_lock() - mark the beginning of an RCU read-side critical section
733  *
734  * When synchronize_rcu() is invoked on one CPU while other CPUs
735  * are within RCU read-side critical sections, then the
736  * synchronize_rcu() is guaranteed to block until after all the other
737  * CPUs exit their critical sections.  Similarly, if call_rcu() is invoked
738  * on one CPU while other CPUs are within RCU read-side critical
739  * sections, invocation of the corresponding RCU callback is deferred
740  * until after the all the other CPUs exit their critical sections.
741  *
742  * Note, however, that RCU callbacks are permitted to run concurrently
743  * with new RCU read-side critical sections.  One way that this can happen
744  * is via the following sequence of events: (1) CPU 0 enters an RCU
745  * read-side critical section, (2) CPU 1 invokes call_rcu() to register
746  * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
747  * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
748  * callback is invoked.  This is legal, because the RCU read-side critical
749  * section that was running concurrently with the call_rcu() (and which
750  * therefore might be referencing something that the corresponding RCU
751  * callback would free up) has completed before the corresponding
752  * RCU callback is invoked.
753  *
754  * RCU read-side critical sections may be nested.  Any deferred actions
755  * will be deferred until the outermost RCU read-side critical section
756  * completes.
757  *
758  * You can avoid reading and understanding the next paragraph by
759  * following this rule: don't put anything in an rcu_read_lock() RCU
760  * read-side critical section that would block in a !PREEMPT kernel.
761  * But if you want the full story, read on!
762  *
763  * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
764  * is illegal to block while in an RCU read-side critical section.  In
765  * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
766  * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
767  * be preempted, but explicit blocking is illegal.  Finally, in preemptible
768  * RCU implementations in real-time (with -rt patchset) kernel builds,
769  * RCU read-side critical sections may be preempted and they may also
770  * block, but only when acquiring spinlocks that are subject to priority
771  * inheritance.
772  */
773 static inline void rcu_read_lock(void)
774 {
775 	__rcu_read_lock();
776 	__acquire(RCU);
777 	rcu_lock_acquire(&rcu_lock_map);
778 	rcu_lockdep_assert(!rcu_is_cpu_idle(),
779 			   "rcu_read_lock() used illegally while idle");
780 }
781 
782 /*
783  * So where is rcu_write_lock()?  It does not exist, as there is no
784  * way for writers to lock out RCU readers.  This is a feature, not
785  * a bug -- this property is what provides RCU's performance benefits.
786  * Of course, writers must coordinate with each other.  The normal
787  * spinlock primitives work well for this, but any other technique may be
788  * used as well.  RCU does not care how the writers keep out of each
789  * others' way, as long as they do so.
790  */
791 
792 /**
793  * rcu_read_unlock() - marks the end of an RCU read-side critical section.
794  *
795  * See rcu_read_lock() for more information.
796  */
797 static inline void rcu_read_unlock(void)
798 {
799 	rcu_lockdep_assert(!rcu_is_cpu_idle(),
800 			   "rcu_read_unlock() used illegally while idle");
801 	rcu_lock_release(&rcu_lock_map);
802 	__release(RCU);
803 	__rcu_read_unlock();
804 }
805 
806 /**
807  * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
808  *
809  * This is equivalent of rcu_read_lock(), but to be used when updates
810  * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
811  * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
812  * softirq handler to be a quiescent state, a process in RCU read-side
813  * critical section must be protected by disabling softirqs. Read-side
814  * critical sections in interrupt context can use just rcu_read_lock(),
815  * though this should at least be commented to avoid confusing people
816  * reading the code.
817  *
818  * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
819  * must occur in the same context, for example, it is illegal to invoke
820  * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
821  * was invoked from some other task.
822  */
823 static inline void rcu_read_lock_bh(void)
824 {
825 	local_bh_disable();
826 	__acquire(RCU_BH);
827 	rcu_lock_acquire(&rcu_bh_lock_map);
828 	rcu_lockdep_assert(!rcu_is_cpu_idle(),
829 			   "rcu_read_lock_bh() used illegally while idle");
830 }
831 
832 /*
833  * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
834  *
835  * See rcu_read_lock_bh() for more information.
836  */
837 static inline void rcu_read_unlock_bh(void)
838 {
839 	rcu_lockdep_assert(!rcu_is_cpu_idle(),
840 			   "rcu_read_unlock_bh() used illegally while idle");
841 	rcu_lock_release(&rcu_bh_lock_map);
842 	__release(RCU_BH);
843 	local_bh_enable();
844 }
845 
846 /**
847  * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
848  *
849  * This is equivalent of rcu_read_lock(), but to be used when updates
850  * are being done using call_rcu_sched() or synchronize_rcu_sched().
851  * Read-side critical sections can also be introduced by anything that
852  * disables preemption, including local_irq_disable() and friends.
853  *
854  * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
855  * must occur in the same context, for example, it is illegal to invoke
856  * rcu_read_unlock_sched() from process context if the matching
857  * rcu_read_lock_sched() was invoked from an NMI handler.
858  */
859 static inline void rcu_read_lock_sched(void)
860 {
861 	preempt_disable();
862 	__acquire(RCU_SCHED);
863 	rcu_lock_acquire(&rcu_sched_lock_map);
864 	rcu_lockdep_assert(!rcu_is_cpu_idle(),
865 			   "rcu_read_lock_sched() used illegally while idle");
866 }
867 
868 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
869 static inline notrace void rcu_read_lock_sched_notrace(void)
870 {
871 	preempt_disable_notrace();
872 	__acquire(RCU_SCHED);
873 }
874 
875 /*
876  * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
877  *
878  * See rcu_read_lock_sched for more information.
879  */
880 static inline void rcu_read_unlock_sched(void)
881 {
882 	rcu_lockdep_assert(!rcu_is_cpu_idle(),
883 			   "rcu_read_unlock_sched() used illegally while idle");
884 	rcu_lock_release(&rcu_sched_lock_map);
885 	__release(RCU_SCHED);
886 	preempt_enable();
887 }
888 
889 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
890 static inline notrace void rcu_read_unlock_sched_notrace(void)
891 {
892 	__release(RCU_SCHED);
893 	preempt_enable_notrace();
894 }
895 
896 /**
897  * rcu_assign_pointer() - assign to RCU-protected pointer
898  * @p: pointer to assign to
899  * @v: value to assign (publish)
900  *
901  * Assigns the specified value to the specified RCU-protected
902  * pointer, ensuring that any concurrent RCU readers will see
903  * any prior initialization.
904  *
905  * Inserts memory barriers on architectures that require them
906  * (which is most of them), and also prevents the compiler from
907  * reordering the code that initializes the structure after the pointer
908  * assignment.  More importantly, this call documents which pointers
909  * will be dereferenced by RCU read-side code.
910  *
911  * In some special cases, you may use RCU_INIT_POINTER() instead
912  * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
913  * to the fact that it does not constrain either the CPU or the compiler.
914  * That said, using RCU_INIT_POINTER() when you should have used
915  * rcu_assign_pointer() is a very bad thing that results in
916  * impossible-to-diagnose memory corruption.  So please be careful.
917  * See the RCU_INIT_POINTER() comment header for details.
918  */
919 #define rcu_assign_pointer(p, v) \
920 	__rcu_assign_pointer((p), (v), __rcu)
921 
922 /**
923  * RCU_INIT_POINTER() - initialize an RCU protected pointer
924  *
925  * Initialize an RCU-protected pointer in special cases where readers
926  * do not need ordering constraints on the CPU or the compiler.  These
927  * special cases are:
928  *
929  * 1.	This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
930  * 2.	The caller has taken whatever steps are required to prevent
931  *	RCU readers from concurrently accessing this pointer -or-
932  * 3.	The referenced data structure has already been exposed to
933  *	readers either at compile time or via rcu_assign_pointer() -and-
934  *	a.	You have not made -any- reader-visible changes to
935  *		this structure since then -or-
936  *	b.	It is OK for readers accessing this structure from its
937  *		new location to see the old state of the structure.  (For
938  *		example, the changes were to statistical counters or to
939  *		other state where exact synchronization is not required.)
940  *
941  * Failure to follow these rules governing use of RCU_INIT_POINTER() will
942  * result in impossible-to-diagnose memory corruption.  As in the structures
943  * will look OK in crash dumps, but any concurrent RCU readers might
944  * see pre-initialized values of the referenced data structure.  So
945  * please be very careful how you use RCU_INIT_POINTER()!!!
946  *
947  * If you are creating an RCU-protected linked structure that is accessed
948  * by a single external-to-structure RCU-protected pointer, then you may
949  * use RCU_INIT_POINTER() to initialize the internal RCU-protected
950  * pointers, but you must use rcu_assign_pointer() to initialize the
951  * external-to-structure pointer -after- you have completely initialized
952  * the reader-accessible portions of the linked structure.
953  */
954 #define RCU_INIT_POINTER(p, v) \
955 	do { \
956 		p = (typeof(*v) __force __rcu *)(v); \
957 	} while (0)
958 
959 /**
960  * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
961  *
962  * GCC-style initialization for an RCU-protected pointer in a structure field.
963  */
964 #define RCU_POINTER_INITIALIZER(p, v) \
965 		.p = (typeof(*v) __force __rcu *)(v)
966 
967 /*
968  * Does the specified offset indicate that the corresponding rcu_head
969  * structure can be handled by kfree_rcu()?
970  */
971 #define __is_kfree_rcu_offset(offset) ((offset) < 4096)
972 
973 /*
974  * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
975  */
976 #define __kfree_rcu(head, offset) \
977 	do { \
978 		BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
979 		kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
980 	} while (0)
981 
982 /**
983  * kfree_rcu() - kfree an object after a grace period.
984  * @ptr:	pointer to kfree
985  * @rcu_head:	the name of the struct rcu_head within the type of @ptr.
986  *
987  * Many rcu callbacks functions just call kfree() on the base structure.
988  * These functions are trivial, but their size adds up, and furthermore
989  * when they are used in a kernel module, that module must invoke the
990  * high-latency rcu_barrier() function at module-unload time.
991  *
992  * The kfree_rcu() function handles this issue.  Rather than encoding a
993  * function address in the embedded rcu_head structure, kfree_rcu() instead
994  * encodes the offset of the rcu_head structure within the base structure.
995  * Because the functions are not allowed in the low-order 4096 bytes of
996  * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
997  * If the offset is larger than 4095 bytes, a compile-time error will
998  * be generated in __kfree_rcu().  If this error is triggered, you can
999  * either fall back to use of call_rcu() or rearrange the structure to
1000  * position the rcu_head structure into the first 4096 bytes.
1001  *
1002  * Note that the allowable offset might decrease in the future, for example,
1003  * to allow something like kmem_cache_free_rcu().
1004  *
1005  * The BUILD_BUG_ON check must not involve any function calls, hence the
1006  * checks are done in macros here.
1007  */
1008 #define kfree_rcu(ptr, rcu_head)					\
1009 	__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
1010 
1011 #ifdef CONFIG_RCU_NOCB_CPU
1012 extern bool rcu_is_nocb_cpu(int cpu);
1013 #else
1014 static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
1015 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
1016 
1017 
1018 #endif /* __LINUX_RCUPDATE_H */
1019