xref: /linux-6.15/include/linux/rcupdate.h (revision e8fa600e)
1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright IBM Corporation, 2001
19  *
20  * Author: Dipankar Sarma <[email protected]>
21  *
22  * Based on the original work by Paul McKenney <[email protected]>
23  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24  * Papers:
25  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27  *
28  * For detailed explanation of Read-Copy Update mechanism see -
29  *		http://lse.sourceforge.net/locking/rcupdate.html
30  *
31  */
32 
33 #ifndef __LINUX_RCUPDATE_H
34 #define __LINUX_RCUPDATE_H
35 
36 #include <linux/types.h>
37 #include <linux/cache.h>
38 #include <linux/spinlock.h>
39 #include <linux/threads.h>
40 #include <linux/cpumask.h>
41 #include <linux/seqlock.h>
42 #include <linux/lockdep.h>
43 #include <linux/completion.h>
44 #include <linux/debugobjects.h>
45 #include <linux/bug.h>
46 #include <linux/compiler.h>
47 
48 #ifdef CONFIG_RCU_TORTURE_TEST
49 extern int rcutorture_runnable; /* for sysctl */
50 #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
51 
52 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
53 extern void rcutorture_record_test_transition(void);
54 extern void rcutorture_record_progress(unsigned long vernum);
55 extern void do_trace_rcu_torture_read(char *rcutorturename,
56 				      struct rcu_head *rhp);
57 #else
58 static inline void rcutorture_record_test_transition(void)
59 {
60 }
61 static inline void rcutorture_record_progress(unsigned long vernum)
62 {
63 }
64 #ifdef CONFIG_RCU_TRACE
65 extern void do_trace_rcu_torture_read(char *rcutorturename,
66 				      struct rcu_head *rhp);
67 #else
68 #define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
69 #endif
70 #endif
71 
72 #define UINT_CMP_GE(a, b)	(UINT_MAX / 2 >= (a) - (b))
73 #define UINT_CMP_LT(a, b)	(UINT_MAX / 2 < (a) - (b))
74 #define ULONG_CMP_GE(a, b)	(ULONG_MAX / 2 >= (a) - (b))
75 #define ULONG_CMP_LT(a, b)	(ULONG_MAX / 2 < (a) - (b))
76 
77 /* Exported common interfaces */
78 
79 #ifdef CONFIG_PREEMPT_RCU
80 
81 /**
82  * call_rcu() - Queue an RCU callback for invocation after a grace period.
83  * @head: structure to be used for queueing the RCU updates.
84  * @func: actual callback function to be invoked after the grace period
85  *
86  * The callback function will be invoked some time after a full grace
87  * period elapses, in other words after all pre-existing RCU read-side
88  * critical sections have completed.  However, the callback function
89  * might well execute concurrently with RCU read-side critical sections
90  * that started after call_rcu() was invoked.  RCU read-side critical
91  * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
92  * and may be nested.
93  */
94 extern void call_rcu(struct rcu_head *head,
95 			      void (*func)(struct rcu_head *head));
96 
97 #else /* #ifdef CONFIG_PREEMPT_RCU */
98 
99 /* In classic RCU, call_rcu() is just call_rcu_sched(). */
100 #define	call_rcu	call_rcu_sched
101 
102 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
103 
104 /**
105  * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
106  * @head: structure to be used for queueing the RCU updates.
107  * @func: actual callback function to be invoked after the grace period
108  *
109  * The callback function will be invoked some time after a full grace
110  * period elapses, in other words after all currently executing RCU
111  * read-side critical sections have completed. call_rcu_bh() assumes
112  * that the read-side critical sections end on completion of a softirq
113  * handler. This means that read-side critical sections in process
114  * context must not be interrupted by softirqs. This interface is to be
115  * used when most of the read-side critical sections are in softirq context.
116  * RCU read-side critical sections are delimited by :
117  *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
118  *  OR
119  *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
120  *  These may be nested.
121  */
122 extern void call_rcu_bh(struct rcu_head *head,
123 			void (*func)(struct rcu_head *head));
124 
125 /**
126  * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
127  * @head: structure to be used for queueing the RCU updates.
128  * @func: actual callback function to be invoked after the grace period
129  *
130  * The callback function will be invoked some time after a full grace
131  * period elapses, in other words after all currently executing RCU
132  * read-side critical sections have completed. call_rcu_sched() assumes
133  * that the read-side critical sections end on enabling of preemption
134  * or on voluntary preemption.
135  * RCU read-side critical sections are delimited by :
136  *  - rcu_read_lock_sched() and  rcu_read_unlock_sched(),
137  *  OR
138  *  anything that disables preemption.
139  *  These may be nested.
140  */
141 extern void call_rcu_sched(struct rcu_head *head,
142 			   void (*func)(struct rcu_head *rcu));
143 
144 extern void synchronize_sched(void);
145 
146 #ifdef CONFIG_PREEMPT_RCU
147 
148 extern void __rcu_read_lock(void);
149 extern void __rcu_read_unlock(void);
150 extern void rcu_read_unlock_special(struct task_struct *t);
151 void synchronize_rcu(void);
152 
153 /*
154  * Defined as a macro as it is a very low level header included from
155  * areas that don't even know about current.  This gives the rcu_read_lock()
156  * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
157  * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
158  */
159 #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
160 
161 #else /* #ifdef CONFIG_PREEMPT_RCU */
162 
163 static inline void __rcu_read_lock(void)
164 {
165 	preempt_disable();
166 }
167 
168 static inline void __rcu_read_unlock(void)
169 {
170 	preempt_enable();
171 }
172 
173 static inline void synchronize_rcu(void)
174 {
175 	synchronize_sched();
176 }
177 
178 static inline int rcu_preempt_depth(void)
179 {
180 	return 0;
181 }
182 
183 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
184 
185 /* Internal to kernel */
186 extern void rcu_sched_qs(int cpu);
187 extern void rcu_bh_qs(int cpu);
188 extern void rcu_check_callbacks(int cpu, int user);
189 struct notifier_block;
190 extern void rcu_idle_enter(void);
191 extern void rcu_idle_exit(void);
192 extern void rcu_irq_enter(void);
193 extern void rcu_irq_exit(void);
194 extern void exit_rcu(void);
195 
196 /**
197  * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
198  * @a: Code that RCU needs to pay attention to.
199  *
200  * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
201  * in the inner idle loop, that is, between the rcu_idle_enter() and
202  * the rcu_idle_exit() -- RCU will happily ignore any such read-side
203  * critical sections.  However, things like powertop need tracepoints
204  * in the inner idle loop.
205  *
206  * This macro provides the way out:  RCU_NONIDLE(do_something_with_RCU())
207  * will tell RCU that it needs to pay attending, invoke its argument
208  * (in this example, a call to the do_something_with_RCU() function),
209  * and then tell RCU to go back to ignoring this CPU.  It is permissible
210  * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
211  * quite limited.  If deeper nesting is required, it will be necessary
212  * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
213  *
214  * This macro may be used from process-level code only.
215  */
216 #define RCU_NONIDLE(a) \
217 	do { \
218 		rcu_idle_exit(); \
219 		do { a; } while (0); \
220 		rcu_idle_enter(); \
221 	} while (0)
222 
223 /*
224  * Infrastructure to implement the synchronize_() primitives in
225  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
226  */
227 
228 typedef void call_rcu_func_t(struct rcu_head *head,
229 			     void (*func)(struct rcu_head *head));
230 void wait_rcu_gp(call_rcu_func_t crf);
231 
232 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
233 #include <linux/rcutree.h>
234 #elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
235 #include <linux/rcutiny.h>
236 #else
237 #error "Unknown RCU implementation specified to kernel configuration"
238 #endif
239 
240 /*
241  * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
242  * initialization and destruction of rcu_head on the stack. rcu_head structures
243  * allocated dynamically in the heap or defined statically don't need any
244  * initialization.
245  */
246 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
247 extern void init_rcu_head_on_stack(struct rcu_head *head);
248 extern void destroy_rcu_head_on_stack(struct rcu_head *head);
249 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
250 static inline void init_rcu_head_on_stack(struct rcu_head *head)
251 {
252 }
253 
254 static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
255 {
256 }
257 #endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
258 
259 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
260 extern int rcu_is_cpu_idle(void);
261 #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */
262 
263 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
264 bool rcu_lockdep_current_cpu_online(void);
265 #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
266 static inline bool rcu_lockdep_current_cpu_online(void)
267 {
268 	return 1;
269 }
270 #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
271 
272 #ifdef CONFIG_DEBUG_LOCK_ALLOC
273 
274 static inline void rcu_lock_acquire(struct lockdep_map *map)
275 {
276 	lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
277 }
278 
279 static inline void rcu_lock_release(struct lockdep_map *map)
280 {
281 	lock_release(map, 1, _THIS_IP_);
282 }
283 
284 extern struct lockdep_map rcu_lock_map;
285 extern struct lockdep_map rcu_bh_lock_map;
286 extern struct lockdep_map rcu_sched_lock_map;
287 extern int debug_lockdep_rcu_enabled(void);
288 
289 /**
290  * rcu_read_lock_held() - might we be in RCU read-side critical section?
291  *
292  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
293  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
294  * this assumes we are in an RCU read-side critical section unless it can
295  * prove otherwise.  This is useful for debug checks in functions that
296  * require that they be called within an RCU read-side critical section.
297  *
298  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
299  * and while lockdep is disabled.
300  *
301  * Note that rcu_read_lock() and the matching rcu_read_unlock() must
302  * occur in the same context, for example, it is illegal to invoke
303  * rcu_read_unlock() in process context if the matching rcu_read_lock()
304  * was invoked from within an irq handler.
305  *
306  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
307  * offline from an RCU perspective, so check for those as well.
308  */
309 static inline int rcu_read_lock_held(void)
310 {
311 	if (!debug_lockdep_rcu_enabled())
312 		return 1;
313 	if (rcu_is_cpu_idle())
314 		return 0;
315 	if (!rcu_lockdep_current_cpu_online())
316 		return 0;
317 	return lock_is_held(&rcu_lock_map);
318 }
319 
320 /*
321  * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
322  * hell.
323  */
324 extern int rcu_read_lock_bh_held(void);
325 
326 /**
327  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
328  *
329  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
330  * RCU-sched read-side critical section.  In absence of
331  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
332  * critical section unless it can prove otherwise.  Note that disabling
333  * of preemption (including disabling irqs) counts as an RCU-sched
334  * read-side critical section.  This is useful for debug checks in functions
335  * that required that they be called within an RCU-sched read-side
336  * critical section.
337  *
338  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
339  * and while lockdep is disabled.
340  *
341  * Note that if the CPU is in the idle loop from an RCU point of
342  * view (ie: that we are in the section between rcu_idle_enter() and
343  * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
344  * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
345  * that are in such a section, considering these as in extended quiescent
346  * state, so such a CPU is effectively never in an RCU read-side critical
347  * section regardless of what RCU primitives it invokes.  This state of
348  * affairs is required --- we need to keep an RCU-free window in idle
349  * where the CPU may possibly enter into low power mode. This way we can
350  * notice an extended quiescent state to other CPUs that started a grace
351  * period. Otherwise we would delay any grace period as long as we run in
352  * the idle task.
353  *
354  * Similarly, we avoid claiming an SRCU read lock held if the current
355  * CPU is offline.
356  */
357 #ifdef CONFIG_PREEMPT_COUNT
358 static inline int rcu_read_lock_sched_held(void)
359 {
360 	int lockdep_opinion = 0;
361 
362 	if (!debug_lockdep_rcu_enabled())
363 		return 1;
364 	if (rcu_is_cpu_idle())
365 		return 0;
366 	if (!rcu_lockdep_current_cpu_online())
367 		return 0;
368 	if (debug_locks)
369 		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
370 	return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
371 }
372 #else /* #ifdef CONFIG_PREEMPT_COUNT */
373 static inline int rcu_read_lock_sched_held(void)
374 {
375 	return 1;
376 }
377 #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
378 
379 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
380 
381 # define rcu_lock_acquire(a)		do { } while (0)
382 # define rcu_lock_release(a)		do { } while (0)
383 
384 static inline int rcu_read_lock_held(void)
385 {
386 	return 1;
387 }
388 
389 static inline int rcu_read_lock_bh_held(void)
390 {
391 	return 1;
392 }
393 
394 #ifdef CONFIG_PREEMPT_COUNT
395 static inline int rcu_read_lock_sched_held(void)
396 {
397 	return preempt_count() != 0 || irqs_disabled();
398 }
399 #else /* #ifdef CONFIG_PREEMPT_COUNT */
400 static inline int rcu_read_lock_sched_held(void)
401 {
402 	return 1;
403 }
404 #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
405 
406 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
407 
408 #ifdef CONFIG_PROVE_RCU
409 
410 extern int rcu_my_thread_group_empty(void);
411 
412 /**
413  * rcu_lockdep_assert - emit lockdep splat if specified condition not met
414  * @c: condition to check
415  * @s: informative message
416  */
417 #define rcu_lockdep_assert(c, s)					\
418 	do {								\
419 		static bool __section(.data.unlikely) __warned;		\
420 		if (debug_lockdep_rcu_enabled() && !__warned && !(c)) {	\
421 			__warned = true;				\
422 			lockdep_rcu_suspicious(__FILE__, __LINE__, s);	\
423 		}							\
424 	} while (0)
425 
426 #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
427 static inline void rcu_preempt_sleep_check(void)
428 {
429 	rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
430 			   "Illegal context switch in RCU read-side critical section");
431 }
432 #else /* #ifdef CONFIG_PROVE_RCU */
433 static inline void rcu_preempt_sleep_check(void)
434 {
435 }
436 #endif /* #else #ifdef CONFIG_PROVE_RCU */
437 
438 #define rcu_sleep_check()						\
439 	do {								\
440 		rcu_preempt_sleep_check();				\
441 		rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),	\
442 				   "Illegal context switch in RCU-bh"	\
443 				   " read-side critical section");	\
444 		rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),	\
445 				   "Illegal context switch in RCU-sched"\
446 				   " read-side critical section");	\
447 	} while (0)
448 
449 #else /* #ifdef CONFIG_PROVE_RCU */
450 
451 #define rcu_lockdep_assert(c, s) do { } while (0)
452 #define rcu_sleep_check() do { } while (0)
453 
454 #endif /* #else #ifdef CONFIG_PROVE_RCU */
455 
456 /*
457  * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
458  * and rcu_assign_pointer().  Some of these could be folded into their
459  * callers, but they are left separate in order to ease introduction of
460  * multiple flavors of pointers to match the multiple flavors of RCU
461  * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
462  * the future.
463  */
464 
465 #ifdef __CHECKER__
466 #define rcu_dereference_sparse(p, space) \
467 	((void)(((typeof(*p) space *)p) == p))
468 #else /* #ifdef __CHECKER__ */
469 #define rcu_dereference_sparse(p, space)
470 #endif /* #else #ifdef __CHECKER__ */
471 
472 #define __rcu_access_pointer(p, space) \
473 	({ \
474 		typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
475 		rcu_dereference_sparse(p, space); \
476 		((typeof(*p) __force __kernel *)(_________p1)); \
477 	})
478 #define __rcu_dereference_check(p, c, space) \
479 	({ \
480 		typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
481 		rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \
482 				      " usage"); \
483 		rcu_dereference_sparse(p, space); \
484 		smp_read_barrier_depends(); \
485 		((typeof(*p) __force __kernel *)(_________p1)); \
486 	})
487 #define __rcu_dereference_protected(p, c, space) \
488 	({ \
489 		rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \
490 				      " usage"); \
491 		rcu_dereference_sparse(p, space); \
492 		((typeof(*p) __force __kernel *)(p)); \
493 	})
494 
495 #define __rcu_access_index(p, space) \
496 	({ \
497 		typeof(p) _________p1 = ACCESS_ONCE(p); \
498 		rcu_dereference_sparse(p, space); \
499 		(_________p1); \
500 	})
501 #define __rcu_dereference_index_check(p, c) \
502 	({ \
503 		typeof(p) _________p1 = ACCESS_ONCE(p); \
504 		rcu_lockdep_assert(c, \
505 				   "suspicious rcu_dereference_index_check()" \
506 				   " usage"); \
507 		smp_read_barrier_depends(); \
508 		(_________p1); \
509 	})
510 #define __rcu_assign_pointer(p, v, space) \
511 	do { \
512 		smp_wmb(); \
513 		(p) = (typeof(*v) __force space *)(v); \
514 	} while (0)
515 
516 
517 /**
518  * rcu_access_pointer() - fetch RCU pointer with no dereferencing
519  * @p: The pointer to read
520  *
521  * Return the value of the specified RCU-protected pointer, but omit the
522  * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
523  * when the value of this pointer is accessed, but the pointer is not
524  * dereferenced, for example, when testing an RCU-protected pointer against
525  * NULL.  Although rcu_access_pointer() may also be used in cases where
526  * update-side locks prevent the value of the pointer from changing, you
527  * should instead use rcu_dereference_protected() for this use case.
528  *
529  * It is also permissible to use rcu_access_pointer() when read-side
530  * access to the pointer was removed at least one grace period ago, as
531  * is the case in the context of the RCU callback that is freeing up
532  * the data, or after a synchronize_rcu() returns.  This can be useful
533  * when tearing down multi-linked structures after a grace period
534  * has elapsed.
535  */
536 #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
537 
538 /**
539  * rcu_dereference_check() - rcu_dereference with debug checking
540  * @p: The pointer to read, prior to dereferencing
541  * @c: The conditions under which the dereference will take place
542  *
543  * Do an rcu_dereference(), but check that the conditions under which the
544  * dereference will take place are correct.  Typically the conditions
545  * indicate the various locking conditions that should be held at that
546  * point.  The check should return true if the conditions are satisfied.
547  * An implicit check for being in an RCU read-side critical section
548  * (rcu_read_lock()) is included.
549  *
550  * For example:
551  *
552  *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
553  *
554  * could be used to indicate to lockdep that foo->bar may only be dereferenced
555  * if either rcu_read_lock() is held, or that the lock required to replace
556  * the bar struct at foo->bar is held.
557  *
558  * Note that the list of conditions may also include indications of when a lock
559  * need not be held, for example during initialisation or destruction of the
560  * target struct:
561  *
562  *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
563  *					      atomic_read(&foo->usage) == 0);
564  *
565  * Inserts memory barriers on architectures that require them
566  * (currently only the Alpha), prevents the compiler from refetching
567  * (and from merging fetches), and, more importantly, documents exactly
568  * which pointers are protected by RCU and checks that the pointer is
569  * annotated as __rcu.
570  */
571 #define rcu_dereference_check(p, c) \
572 	__rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
573 
574 /**
575  * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
576  * @p: The pointer to read, prior to dereferencing
577  * @c: The conditions under which the dereference will take place
578  *
579  * This is the RCU-bh counterpart to rcu_dereference_check().
580  */
581 #define rcu_dereference_bh_check(p, c) \
582 	__rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
583 
584 /**
585  * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
586  * @p: The pointer to read, prior to dereferencing
587  * @c: The conditions under which the dereference will take place
588  *
589  * This is the RCU-sched counterpart to rcu_dereference_check().
590  */
591 #define rcu_dereference_sched_check(p, c) \
592 	__rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
593 				__rcu)
594 
595 #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
596 
597 /**
598  * rcu_access_index() - fetch RCU index with no dereferencing
599  * @p: The index to read
600  *
601  * Return the value of the specified RCU-protected index, but omit the
602  * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
603  * when the value of this index is accessed, but the index is not
604  * dereferenced, for example, when testing an RCU-protected index against
605  * -1.  Although rcu_access_index() may also be used in cases where
606  * update-side locks prevent the value of the index from changing, you
607  * should instead use rcu_dereference_index_protected() for this use case.
608  */
609 #define rcu_access_index(p) __rcu_access_index((p), __rcu)
610 
611 /**
612  * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
613  * @p: The pointer to read, prior to dereferencing
614  * @c: The conditions under which the dereference will take place
615  *
616  * Similar to rcu_dereference_check(), but omits the sparse checking.
617  * This allows rcu_dereference_index_check() to be used on integers,
618  * which can then be used as array indices.  Attempting to use
619  * rcu_dereference_check() on an integer will give compiler warnings
620  * because the sparse address-space mechanism relies on dereferencing
621  * the RCU-protected pointer.  Dereferencing integers is not something
622  * that even gcc will put up with.
623  *
624  * Note that this function does not implicitly check for RCU read-side
625  * critical sections.  If this function gains lots of uses, it might
626  * make sense to provide versions for each flavor of RCU, but it does
627  * not make sense as of early 2010.
628  */
629 #define rcu_dereference_index_check(p, c) \
630 	__rcu_dereference_index_check((p), (c))
631 
632 /**
633  * rcu_dereference_protected() - fetch RCU pointer when updates prevented
634  * @p: The pointer to read, prior to dereferencing
635  * @c: The conditions under which the dereference will take place
636  *
637  * Return the value of the specified RCU-protected pointer, but omit
638  * both the smp_read_barrier_depends() and the ACCESS_ONCE().  This
639  * is useful in cases where update-side locks prevent the value of the
640  * pointer from changing.  Please note that this primitive does -not-
641  * prevent the compiler from repeating this reference or combining it
642  * with other references, so it should not be used without protection
643  * of appropriate locks.
644  *
645  * This function is only for update-side use.  Using this function
646  * when protected only by rcu_read_lock() will result in infrequent
647  * but very ugly failures.
648  */
649 #define rcu_dereference_protected(p, c) \
650 	__rcu_dereference_protected((p), (c), __rcu)
651 
652 
653 /**
654  * rcu_dereference() - fetch RCU-protected pointer for dereferencing
655  * @p: The pointer to read, prior to dereferencing
656  *
657  * This is a simple wrapper around rcu_dereference_check().
658  */
659 #define rcu_dereference(p) rcu_dereference_check(p, 0)
660 
661 /**
662  * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
663  * @p: The pointer to read, prior to dereferencing
664  *
665  * Makes rcu_dereference_check() do the dirty work.
666  */
667 #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
668 
669 /**
670  * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
671  * @p: The pointer to read, prior to dereferencing
672  *
673  * Makes rcu_dereference_check() do the dirty work.
674  */
675 #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
676 
677 /**
678  * rcu_read_lock() - mark the beginning of an RCU read-side critical section
679  *
680  * When synchronize_rcu() is invoked on one CPU while other CPUs
681  * are within RCU read-side critical sections, then the
682  * synchronize_rcu() is guaranteed to block until after all the other
683  * CPUs exit their critical sections.  Similarly, if call_rcu() is invoked
684  * on one CPU while other CPUs are within RCU read-side critical
685  * sections, invocation of the corresponding RCU callback is deferred
686  * until after the all the other CPUs exit their critical sections.
687  *
688  * Note, however, that RCU callbacks are permitted to run concurrently
689  * with new RCU read-side critical sections.  One way that this can happen
690  * is via the following sequence of events: (1) CPU 0 enters an RCU
691  * read-side critical section, (2) CPU 1 invokes call_rcu() to register
692  * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
693  * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
694  * callback is invoked.  This is legal, because the RCU read-side critical
695  * section that was running concurrently with the call_rcu() (and which
696  * therefore might be referencing something that the corresponding RCU
697  * callback would free up) has completed before the corresponding
698  * RCU callback is invoked.
699  *
700  * RCU read-side critical sections may be nested.  Any deferred actions
701  * will be deferred until the outermost RCU read-side critical section
702  * completes.
703  *
704  * You can avoid reading and understanding the next paragraph by
705  * following this rule: don't put anything in an rcu_read_lock() RCU
706  * read-side critical section that would block in a !PREEMPT kernel.
707  * But if you want the full story, read on!
708  *
709  * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
710  * is illegal to block while in an RCU read-side critical section.  In
711  * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
712  * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
713  * be preempted, but explicit blocking is illegal.  Finally, in preemptible
714  * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds,
715  * RCU read-side critical sections may be preempted and they may also
716  * block, but only when acquiring spinlocks that are subject to priority
717  * inheritance.
718  */
719 static inline void rcu_read_lock(void)
720 {
721 	__rcu_read_lock();
722 	__acquire(RCU);
723 	rcu_lock_acquire(&rcu_lock_map);
724 	rcu_lockdep_assert(!rcu_is_cpu_idle(),
725 			   "rcu_read_lock() used illegally while idle");
726 }
727 
728 /*
729  * So where is rcu_write_lock()?  It does not exist, as there is no
730  * way for writers to lock out RCU readers.  This is a feature, not
731  * a bug -- this property is what provides RCU's performance benefits.
732  * Of course, writers must coordinate with each other.  The normal
733  * spinlock primitives work well for this, but any other technique may be
734  * used as well.  RCU does not care how the writers keep out of each
735  * others' way, as long as they do so.
736  */
737 
738 /**
739  * rcu_read_unlock() - marks the end of an RCU read-side critical section.
740  *
741  * See rcu_read_lock() for more information.
742  */
743 static inline void rcu_read_unlock(void)
744 {
745 	rcu_lockdep_assert(!rcu_is_cpu_idle(),
746 			   "rcu_read_unlock() used illegally while idle");
747 	rcu_lock_release(&rcu_lock_map);
748 	__release(RCU);
749 	__rcu_read_unlock();
750 }
751 
752 /**
753  * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
754  *
755  * This is equivalent of rcu_read_lock(), but to be used when updates
756  * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
757  * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
758  * softirq handler to be a quiescent state, a process in RCU read-side
759  * critical section must be protected by disabling softirqs. Read-side
760  * critical sections in interrupt context can use just rcu_read_lock(),
761  * though this should at least be commented to avoid confusing people
762  * reading the code.
763  *
764  * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
765  * must occur in the same context, for example, it is illegal to invoke
766  * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
767  * was invoked from some other task.
768  */
769 static inline void rcu_read_lock_bh(void)
770 {
771 	local_bh_disable();
772 	__acquire(RCU_BH);
773 	rcu_lock_acquire(&rcu_bh_lock_map);
774 	rcu_lockdep_assert(!rcu_is_cpu_idle(),
775 			   "rcu_read_lock_bh() used illegally while idle");
776 }
777 
778 /*
779  * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
780  *
781  * See rcu_read_lock_bh() for more information.
782  */
783 static inline void rcu_read_unlock_bh(void)
784 {
785 	rcu_lockdep_assert(!rcu_is_cpu_idle(),
786 			   "rcu_read_unlock_bh() used illegally while idle");
787 	rcu_lock_release(&rcu_bh_lock_map);
788 	__release(RCU_BH);
789 	local_bh_enable();
790 }
791 
792 /**
793  * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
794  *
795  * This is equivalent of rcu_read_lock(), but to be used when updates
796  * are being done using call_rcu_sched() or synchronize_rcu_sched().
797  * Read-side critical sections can also be introduced by anything that
798  * disables preemption, including local_irq_disable() and friends.
799  *
800  * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
801  * must occur in the same context, for example, it is illegal to invoke
802  * rcu_read_unlock_sched() from process context if the matching
803  * rcu_read_lock_sched() was invoked from an NMI handler.
804  */
805 static inline void rcu_read_lock_sched(void)
806 {
807 	preempt_disable();
808 	__acquire(RCU_SCHED);
809 	rcu_lock_acquire(&rcu_sched_lock_map);
810 	rcu_lockdep_assert(!rcu_is_cpu_idle(),
811 			   "rcu_read_lock_sched() used illegally while idle");
812 }
813 
814 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
815 static inline notrace void rcu_read_lock_sched_notrace(void)
816 {
817 	preempt_disable_notrace();
818 	__acquire(RCU_SCHED);
819 }
820 
821 /*
822  * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
823  *
824  * See rcu_read_lock_sched for more information.
825  */
826 static inline void rcu_read_unlock_sched(void)
827 {
828 	rcu_lockdep_assert(!rcu_is_cpu_idle(),
829 			   "rcu_read_unlock_sched() used illegally while idle");
830 	rcu_lock_release(&rcu_sched_lock_map);
831 	__release(RCU_SCHED);
832 	preempt_enable();
833 }
834 
835 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
836 static inline notrace void rcu_read_unlock_sched_notrace(void)
837 {
838 	__release(RCU_SCHED);
839 	preempt_enable_notrace();
840 }
841 
842 /**
843  * rcu_assign_pointer() - assign to RCU-protected pointer
844  * @p: pointer to assign to
845  * @v: value to assign (publish)
846  *
847  * Assigns the specified value to the specified RCU-protected
848  * pointer, ensuring that any concurrent RCU readers will see
849  * any prior initialization.
850  *
851  * Inserts memory barriers on architectures that require them
852  * (which is most of them), and also prevents the compiler from
853  * reordering the code that initializes the structure after the pointer
854  * assignment.  More importantly, this call documents which pointers
855  * will be dereferenced by RCU read-side code.
856  *
857  * In some special cases, you may use RCU_INIT_POINTER() instead
858  * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
859  * to the fact that it does not constrain either the CPU or the compiler.
860  * That said, using RCU_INIT_POINTER() when you should have used
861  * rcu_assign_pointer() is a very bad thing that results in
862  * impossible-to-diagnose memory corruption.  So please be careful.
863  * See the RCU_INIT_POINTER() comment header for details.
864  */
865 #define rcu_assign_pointer(p, v) \
866 	__rcu_assign_pointer((p), (v), __rcu)
867 
868 /**
869  * RCU_INIT_POINTER() - initialize an RCU protected pointer
870  *
871  * Initialize an RCU-protected pointer in special cases where readers
872  * do not need ordering constraints on the CPU or the compiler.  These
873  * special cases are:
874  *
875  * 1.	This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
876  * 2.	The caller has taken whatever steps are required to prevent
877  *	RCU readers from concurrently accessing this pointer -or-
878  * 3.	The referenced data structure has already been exposed to
879  *	readers either at compile time or via rcu_assign_pointer() -and-
880  *	a.	You have not made -any- reader-visible changes to
881  *		this structure since then -or-
882  *	b.	It is OK for readers accessing this structure from its
883  *		new location to see the old state of the structure.  (For
884  *		example, the changes were to statistical counters or to
885  *		other state where exact synchronization is not required.)
886  *
887  * Failure to follow these rules governing use of RCU_INIT_POINTER() will
888  * result in impossible-to-diagnose memory corruption.  As in the structures
889  * will look OK in crash dumps, but any concurrent RCU readers might
890  * see pre-initialized values of the referenced data structure.  So
891  * please be very careful how you use RCU_INIT_POINTER()!!!
892  *
893  * If you are creating an RCU-protected linked structure that is accessed
894  * by a single external-to-structure RCU-protected pointer, then you may
895  * use RCU_INIT_POINTER() to initialize the internal RCU-protected
896  * pointers, but you must use rcu_assign_pointer() to initialize the
897  * external-to-structure pointer -after- you have completely initialized
898  * the reader-accessible portions of the linked structure.
899  */
900 #define RCU_INIT_POINTER(p, v) \
901 	do { \
902 		p = (typeof(*v) __force __rcu *)(v); \
903 	} while (0)
904 
905 /**
906  * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
907  *
908  * GCC-style initialization for an RCU-protected pointer in a structure field.
909  */
910 #define RCU_POINTER_INITIALIZER(p, v) \
911 		.p = (typeof(*v) __force __rcu *)(v)
912 
913 /*
914  * Does the specified offset indicate that the corresponding rcu_head
915  * structure can be handled by kfree_rcu()?
916  */
917 #define __is_kfree_rcu_offset(offset) ((offset) < 4096)
918 
919 /*
920  * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
921  */
922 #define __kfree_rcu(head, offset) \
923 	do { \
924 		BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
925 		kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
926 	} while (0)
927 
928 /**
929  * kfree_rcu() - kfree an object after a grace period.
930  * @ptr:	pointer to kfree
931  * @rcu_head:	the name of the struct rcu_head within the type of @ptr.
932  *
933  * Many rcu callbacks functions just call kfree() on the base structure.
934  * These functions are trivial, but their size adds up, and furthermore
935  * when they are used in a kernel module, that module must invoke the
936  * high-latency rcu_barrier() function at module-unload time.
937  *
938  * The kfree_rcu() function handles this issue.  Rather than encoding a
939  * function address in the embedded rcu_head structure, kfree_rcu() instead
940  * encodes the offset of the rcu_head structure within the base structure.
941  * Because the functions are not allowed in the low-order 4096 bytes of
942  * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
943  * If the offset is larger than 4095 bytes, a compile-time error will
944  * be generated in __kfree_rcu().  If this error is triggered, you can
945  * either fall back to use of call_rcu() or rearrange the structure to
946  * position the rcu_head structure into the first 4096 bytes.
947  *
948  * Note that the allowable offset might decrease in the future, for example,
949  * to allow something like kmem_cache_free_rcu().
950  *
951  * The BUILD_BUG_ON check must not involve any function calls, hence the
952  * checks are done in macros here.
953  */
954 #define kfree_rcu(ptr, rcu_head)					\
955 	__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
956 
957 #endif /* __LINUX_RCUPDATE_H */
958