xref: /linux-6.15/include/linux/rcupdate_trace.h (revision 7c565a4d)
1d5f177d3SPaul E. McKenney /* SPDX-License-Identifier: GPL-2.0+ */
2d5f177d3SPaul E. McKenney /*
3d5f177d3SPaul E. McKenney  * Read-Copy Update mechanism for mutual exclusion, adapted for tracing.
4d5f177d3SPaul E. McKenney  *
5d5f177d3SPaul E. McKenney  * Copyright (C) 2020 Paul E. McKenney.
6d5f177d3SPaul E. McKenney  */
7d5f177d3SPaul E. McKenney 
8d5f177d3SPaul E. McKenney #ifndef __LINUX_RCUPDATE_TRACE_H
9d5f177d3SPaul E. McKenney #define __LINUX_RCUPDATE_TRACE_H
10d5f177d3SPaul E. McKenney 
11d5f177d3SPaul E. McKenney #include <linux/sched.h>
12d5f177d3SPaul E. McKenney #include <linux/rcupdate.h>
13*7c565a4dSMathieu Desnoyers #include <linux/cleanup.h>
14d5f177d3SPaul E. McKenney 
15d5f177d3SPaul E. McKenney extern struct lockdep_map rcu_trace_lock_map;
16d5f177d3SPaul E. McKenney 
17891cd1f9SJakub Kicinski #ifdef CONFIG_DEBUG_LOCK_ALLOC
18891cd1f9SJakub Kicinski 
rcu_read_lock_trace_held(void)19d5f177d3SPaul E. McKenney static inline int rcu_read_lock_trace_held(void)
20d5f177d3SPaul E. McKenney {
21d5f177d3SPaul E. McKenney 	return lock_is_held(&rcu_trace_lock_map);
22d5f177d3SPaul E. McKenney }
23d5f177d3SPaul E. McKenney 
24d5f177d3SPaul E. McKenney #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
25d5f177d3SPaul E. McKenney 
rcu_read_lock_trace_held(void)26d5f177d3SPaul E. McKenney static inline int rcu_read_lock_trace_held(void)
27d5f177d3SPaul E. McKenney {
28d5f177d3SPaul E. McKenney 	return 1;
29d5f177d3SPaul E. McKenney }
30d5f177d3SPaul E. McKenney 
31d5f177d3SPaul E. McKenney #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
32d5f177d3SPaul E. McKenney 
33d5f177d3SPaul E. McKenney #ifdef CONFIG_TASKS_TRACE_RCU
34d5f177d3SPaul E. McKenney 
35a5c071ccSPaul E. McKenney void rcu_read_unlock_trace_special(struct task_struct *t);
36d5f177d3SPaul E. McKenney 
37d5f177d3SPaul E. McKenney /**
38d5f177d3SPaul E. McKenney  * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
39d5f177d3SPaul E. McKenney  *
40c7dcf810SPaul E. McKenney  * When synchronize_rcu_tasks_trace() is invoked by one task, then that
41c7dcf810SPaul E. McKenney  * task is guaranteed to block until all other tasks exit their read-side
42d5f177d3SPaul E. McKenney  * critical sections.  Similarly, if call_rcu_trace() is invoked on one
43d5f177d3SPaul E. McKenney  * task while other tasks are within RCU read-side critical sections,
44d5f177d3SPaul E. McKenney  * invocation of the corresponding RCU callback is deferred until after
45d5f177d3SPaul E. McKenney  * the all the other tasks exit their critical sections.
46d5f177d3SPaul E. McKenney  *
47d5f177d3SPaul E. McKenney  * For more details, please see the documentation for rcu_read_lock().
48d5f177d3SPaul E. McKenney  */
rcu_read_lock_trace(void)49d5f177d3SPaul E. McKenney static inline void rcu_read_lock_trace(void)
50d5f177d3SPaul E. McKenney {
51d5f177d3SPaul E. McKenney 	struct task_struct *t = current;
52d5f177d3SPaul E. McKenney 
53d5f177d3SPaul E. McKenney 	WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
54ba3a86e4SPaul E. McKenney 	barrier();
559ae58d7bSPaul E. McKenney 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
569ae58d7bSPaul E. McKenney 	    t->trc_reader_special.b.need_mb)
57276c4104SPaul E. McKenney 		smp_mb(); // Pairs with update-side barriers
58d5f177d3SPaul E. McKenney 	rcu_lock_acquire(&rcu_trace_lock_map);
59d5f177d3SPaul E. McKenney }
60d5f177d3SPaul E. McKenney 
61d5f177d3SPaul E. McKenney /**
62d5f177d3SPaul E. McKenney  * rcu_read_unlock_trace - mark end of RCU-trace read-side critical section
63d5f177d3SPaul E. McKenney  *
64d5f177d3SPaul E. McKenney  * Pairs with a preceding call to rcu_read_lock_trace(), and nesting is
65d5f177d3SPaul E. McKenney  * allowed.  Invoking a rcu_read_unlock_trace() when there is no matching
66d5f177d3SPaul E. McKenney  * rcu_read_lock_trace() is verboten, and will result in lockdep complaints.
67d5f177d3SPaul E. McKenney  *
68d5f177d3SPaul E. McKenney  * For more details, please see the documentation for rcu_read_unlock().
69d5f177d3SPaul E. McKenney  */
rcu_read_unlock_trace(void)70d5f177d3SPaul E. McKenney static inline void rcu_read_unlock_trace(void)
71d5f177d3SPaul E. McKenney {
72d5f177d3SPaul E. McKenney 	int nesting;
73d5f177d3SPaul E. McKenney 	struct task_struct *t = current;
74d5f177d3SPaul E. McKenney 
75d5f177d3SPaul E. McKenney 	rcu_lock_release(&rcu_trace_lock_map);
76d5f177d3SPaul E. McKenney 	nesting = READ_ONCE(t->trc_reader_nesting) - 1;
77ba3a86e4SPaul E. McKenney 	barrier(); // Critical section before disabling.
78ba3a86e4SPaul E. McKenney 	// Disable IPI-based setting of .need_qs.
790356d4e6SPaul E. McKenney 	WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting);
80276c4104SPaul E. McKenney 	if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
81d5f177d3SPaul E. McKenney 		WRITE_ONCE(t->trc_reader_nesting, nesting);
82d5f177d3SPaul E. McKenney 		return;  // We assume shallow reader nesting.
83276c4104SPaul E. McKenney 	}
84a5c071ccSPaul E. McKenney 	WARN_ON_ONCE(nesting != 0);
85a5c071ccSPaul E. McKenney 	rcu_read_unlock_trace_special(t);
86d5f177d3SPaul E. McKenney }
87d5f177d3SPaul E. McKenney 
88d5f177d3SPaul E. McKenney void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
89d5f177d3SPaul E. McKenney void synchronize_rcu_tasks_trace(void);
90d5f177d3SPaul E. McKenney void rcu_barrier_tasks_trace(void);
915f8e3202SPaul E. McKenney struct task_struct *get_rcu_tasks_trace_gp_kthread(void);
929667305cSAlexei Starovoitov #else
939667305cSAlexei Starovoitov /*
949667305cSAlexei Starovoitov  * The BPF JIT forms these addresses even when it doesn't call these
959667305cSAlexei Starovoitov  * functions, so provide definitions that result in runtime errors.
969667305cSAlexei Starovoitov  */
call_rcu_tasks_trace(struct rcu_head * rhp,rcu_callback_t func)979667305cSAlexei Starovoitov static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); }
rcu_read_lock_trace(void)989667305cSAlexei Starovoitov static inline void rcu_read_lock_trace(void) { BUG(); }
rcu_read_unlock_trace(void)999667305cSAlexei Starovoitov static inline void rcu_read_unlock_trace(void) { BUG(); }
100d5f177d3SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
101d5f177d3SPaul E. McKenney 
102*7c565a4dSMathieu Desnoyers DEFINE_LOCK_GUARD_0(rcu_tasks_trace,
103*7c565a4dSMathieu Desnoyers 	rcu_read_lock_trace(),
104*7c565a4dSMathieu Desnoyers 	rcu_read_unlock_trace())
105*7c565a4dSMathieu Desnoyers 
106d5f177d3SPaul E. McKenney #endif /* __LINUX_RCUPDATE_TRACE_H */
107