18c366db0SPaul E. McKenney /* SPDX-License-Identifier: GPL-2.0+ */
2d8be8173SPaul E. McKenney /*
3d8be8173SPaul E. McKenney * Sleepable Read-Copy Update mechanism for mutual exclusion,
4d8be8173SPaul E. McKenney * tree variant.
5d8be8173SPaul E. McKenney *
6d8be8173SPaul E. McKenney * Copyright (C) IBM Corporation, 2017
7d8be8173SPaul E. McKenney *
88c366db0SPaul E. McKenney * Author: Paul McKenney <[email protected]>
9d8be8173SPaul E. McKenney */
10d8be8173SPaul E. McKenney
11d8be8173SPaul E. McKenney #ifndef _LINUX_SRCU_TREE_H
12d8be8173SPaul E. McKenney #define _LINUX_SRCU_TREE_H
13d8be8173SPaul E. McKenney
14da915ad5SPaul E. McKenney #include <linux/rcu_node_tree.h>
15da915ad5SPaul E. McKenney #include <linux/completion.h>
16da915ad5SPaul E. McKenney
17da915ad5SPaul E. McKenney struct srcu_node;
18da915ad5SPaul E. McKenney struct srcu_struct;
19da915ad5SPaul E. McKenney
2056eb8be1SPaul E. McKenney /* One element of the srcu_data srcu_ctrs array. */
2156eb8be1SPaul E. McKenney struct srcu_ctr {
2256eb8be1SPaul E. McKenney atomic_long_t srcu_locks; /* Locks per CPU. */
2356eb8be1SPaul E. McKenney atomic_long_t srcu_unlocks; /* Unlocks per CPU. */
2456eb8be1SPaul E. McKenney };
2556eb8be1SPaul E. McKenney
26da915ad5SPaul E. McKenney /*
27da915ad5SPaul E. McKenney * Per-CPU structure feeding into leaf srcu_node, similar in function
28da915ad5SPaul E. McKenney * to rcu_node.
29da915ad5SPaul E. McKenney */
30da915ad5SPaul E. McKenney struct srcu_data {
31da915ad5SPaul E. McKenney /* Read-side state. */
3256eb8be1SPaul E. McKenney struct srcu_ctr srcu_ctrs[2]; /* Locks and unlocks per CPU. */
33365f3448SPaul E. McKenney int srcu_reader_flavor; /* Reader flavor for srcu_struct structure? */
340fef924eSPaul E. McKenney /* Values: SRCU_READ_FLAVOR_.* */
35da915ad5SPaul E. McKenney
36da915ad5SPaul E. McKenney /* Update-side state. */
37d6331980SPaul E. McKenney spinlock_t __private lock ____cacheline_internodealigned_in_smp;
38da915ad5SPaul E. McKenney struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
39da915ad5SPaul E. McKenney unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
401e9a038bSPaul E. McKenney unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
41da915ad5SPaul E. McKenney bool srcu_cblist_invoking; /* Invoking these CBs? */
42e81baf4cSSebastian Andrzej Siewior struct timer_list delay_work; /* Delay for CB invoking */
43e81baf4cSSebastian Andrzej Siewior struct work_struct work; /* Context for CB invoking. */
44da915ad5SPaul E. McKenney struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */
45da915ad5SPaul E. McKenney struct srcu_node *mynode; /* Leaf srcu_node. */
46c7e88067SPaul E. McKenney unsigned long grpmask; /* Mask for leaf srcu_node */
47c7e88067SPaul E. McKenney /* ->srcu_data_have_cbs[]. */
48da915ad5SPaul E. McKenney int cpu;
49aacb5d91SPaul E. McKenney struct srcu_struct *ssp;
50d8be8173SPaul E. McKenney };
51d8be8173SPaul E. McKenney
52da915ad5SPaul E. McKenney /*
53da915ad5SPaul E. McKenney * Node in SRCU combining tree, similar in function to rcu_data.
54da915ad5SPaul E. McKenney */
55da915ad5SPaul E. McKenney struct srcu_node {
56d6331980SPaul E. McKenney spinlock_t __private lock;
5795ebe80dSPaul E. McKenney unsigned long srcu_have_cbs[4]; /* GP seq for children having CBs, but only */
58aa5210f5SPingfan Liu /* if greater than ->srcu_gp_seq. */
5995ebe80dSPaul E. McKenney unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs have CBs for given GP? */
601e9a038bSPaul E. McKenney unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
61da915ad5SPaul E. McKenney struct srcu_node *srcu_parent; /* Next up in tree. */
62da915ad5SPaul E. McKenney int grplo; /* Least CPU for node. */
63da915ad5SPaul E. McKenney int grphi; /* Biggest CPU for node. */
64da915ad5SPaul E. McKenney };
65da915ad5SPaul E. McKenney
66da915ad5SPaul E. McKenney /*
6795433f72SPaul E. McKenney * Per-SRCU-domain structure, update-side data linked from srcu_struct.
68da915ad5SPaul E. McKenney */
6995433f72SPaul E. McKenney struct srcu_usage {
702ec30311SPaul E. McKenney struct srcu_node *node; /* Combining tree. */
71da915ad5SPaul E. McKenney struct srcu_node *level[RCU_NUM_LVLS + 1];
72da915ad5SPaul E. McKenney /* First node at each level. */
73994f7068SPaul E. McKenney int srcu_size_state; /* Small-to-big transition state. */
74da915ad5SPaul E. McKenney struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
75994f7068SPaul E. McKenney spinlock_t __private lock; /* Protect counters and size state. */
76da915ad5SPaul E. McKenney struct mutex srcu_gp_mutex; /* Serialize GP work. */
77da915ad5SPaul E. McKenney unsigned long srcu_gp_seq; /* Grace-period seq #. */
78da915ad5SPaul E. McKenney unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */
791e9a038bSPaul E. McKenney unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
80282d8998SPaul E. McKenney unsigned long srcu_gp_start; /* Last GP start timestamp (jiffies) */
8122607d66SPaul E. McKenney unsigned long srcu_last_gp_end; /* Last GP end timestamp (ns) */
829f2e91d9SPaul E. McKenney unsigned long srcu_size_jiffies; /* Current contention-measurement interval. */
839f2e91d9SPaul E. McKenney unsigned long srcu_n_lock_retries; /* Contention events in current interval. */
84282d8998SPaul E. McKenney unsigned long srcu_n_exp_nodelay; /* # expedited no-delays in current GP phase. */
8546470cf8SPaul E. McKenney bool sda_is_static; /* May ->sda be passed to free_percpu()? */
86da915ad5SPaul E. McKenney unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */
87da915ad5SPaul E. McKenney struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */
88da915ad5SPaul E. McKenney struct completion srcu_barrier_completion;
89da915ad5SPaul E. McKenney /* Awaken barrier rq at end. */
90da915ad5SPaul E. McKenney atomic_t srcu_barrier_cpu_cnt; /* # CPUs not yet posting a */
91da915ad5SPaul E. McKenney /* callback for the barrier */
92da915ad5SPaul E. McKenney /* operation. */
93282d8998SPaul E. McKenney unsigned long reschedule_jiffies;
94282d8998SPaul E. McKenney unsigned long reschedule_count;
95d8be8173SPaul E. McKenney struct delayed_work work;
96fd1b3f8eSPaul E. McKenney struct srcu_struct *srcu_ssp;
97d8be8173SPaul E. McKenney };
98d8be8173SPaul E. McKenney
9995433f72SPaul E. McKenney /*
100d8be8173SPaul E. McKenney * Per-SRCU-domain structure, similar in function to rcu_state.
101d8be8173SPaul E. McKenney */
102d8be8173SPaul E. McKenney struct srcu_struct {
103795e7efeSPaul E. McKenney struct srcu_ctr __percpu *srcu_ctrp;
104d8be8173SPaul E. McKenney struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */
105d8be8173SPaul E. McKenney struct lockdep_map dep_map;
10695433f72SPaul E. McKenney struct srcu_usage *srcu_sup; /* Update-side data. */
107d8be8173SPaul E. McKenney };
108d8be8173SPaul E. McKenney
109e15a1930SPingfan Liu // Values for size state variable (->srcu_size_state). Once the state
110e15a1930SPingfan Liu // has been set to SRCU_SIZE_ALLOC, the grace-period code advances through
111e15a1930SPingfan Liu // this state machine one step per grace period until the SRCU_SIZE_BIG state
112e15a1930SPingfan Liu // is reached. Otherwise, the state machine remains in the SRCU_SIZE_SMALL
113e15a1930SPingfan Liu // state indefinitely.
114e15a1930SPingfan Liu #define SRCU_SIZE_SMALL 0 // No srcu_node combining tree, ->node == NULL
115e15a1930SPingfan Liu #define SRCU_SIZE_ALLOC 1 // An srcu_node tree is being allocated, initialized,
116e15a1930SPingfan Liu // and then referenced by ->node. It will not be used.
117e15a1930SPingfan Liu #define SRCU_SIZE_WAIT_BARRIER 2 // The srcu_node tree starts being used by everything
118e15a1930SPingfan Liu // except call_srcu(), especially by srcu_barrier().
119e15a1930SPingfan Liu // By the end of this state, all CPUs and threads
120e15a1930SPingfan Liu // are aware of this tree's existence.
121e15a1930SPingfan Liu #define SRCU_SIZE_WAIT_CALL 3 // The srcu_node tree starts being used by call_srcu().
122e15a1930SPingfan Liu // By the end of this state, all of the call_srcu()
123e15a1930SPingfan Liu // invocations that were running on a non-boot CPU
124e15a1930SPingfan Liu // and using the boot CPU's callback queue will have
125e15a1930SPingfan Liu // completed.
126e15a1930SPingfan Liu #define SRCU_SIZE_WAIT_CBS1 4 // Don't trust the ->srcu_have_cbs[] grace-period
127e15a1930SPingfan Liu #define SRCU_SIZE_WAIT_CBS2 5 // sequence elements or the ->srcu_data_have_cbs[]
128e15a1930SPingfan Liu #define SRCU_SIZE_WAIT_CBS3 6 // CPU-bitmask elements until all four elements of
129e15a1930SPingfan Liu #define SRCU_SIZE_WAIT_CBS4 7 // each array have been initialized.
130e15a1930SPingfan Liu #define SRCU_SIZE_BIG 8 // The srcu_node combining tree is fully initialized
131e15a1930SPingfan Liu // and all aspects of it are being put to use.
132994f7068SPaul E. McKenney
133da915ad5SPaul E. McKenney /* Values for state variable (bottom bits of ->srcu_gp_seq). */
134d8be8173SPaul E. McKenney #define SRCU_STATE_IDLE 0
135d8be8173SPaul E. McKenney #define SRCU_STATE_SCAN1 1
136d8be8173SPaul E. McKenney #define SRCU_STATE_SCAN2 2
137d8be8173SPaul E. McKenney
13829bc83e4SJP Kobryn /*
13929bc83e4SJP Kobryn * Values for initializing gp sequence fields. Higher values allow wrap arounds to
14029bc83e4SJP Kobryn * occur earlier.
14129bc83e4SJP Kobryn * The second value with state is useful in the case of static initialization of
14229bc83e4SJP Kobryn * srcu_usage where srcu_gp_seq_needed is expected to have some state value in its
14329bc83e4SJP Kobryn * lower bits (or else it will appear to be already initialized within
14429bc83e4SJP Kobryn * the call check_init_srcu_struct()).
14529bc83e4SJP Kobryn */
14629bc83e4SJP Kobryn #define SRCU_GP_SEQ_INITIAL_VAL ((0UL - 100UL) << RCU_SEQ_CTR_SHIFT)
14729bc83e4SJP Kobryn #define SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE (SRCU_GP_SEQ_INITIAL_VAL - 1)
14829bc83e4SJP Kobryn
14903200b5cSPaul E. McKenney #define __SRCU_USAGE_INIT(name) \
150d8be8173SPaul E. McKenney { \
151d6331980SPaul E. McKenney .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
15229bc83e4SJP Kobryn .srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL, \
15329bc83e4SJP Kobryn .srcu_gp_seq_needed = SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE, \
15429bc83e4SJP Kobryn .srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL, \
1554e6ea4efSPaul E. McKenney .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
15603200b5cSPaul E. McKenney }
15703200b5cSPaul E. McKenney
15803200b5cSPaul E. McKenney #define __SRCU_STRUCT_INIT_COMMON(name, usage_name) \
15995433f72SPaul E. McKenney .srcu_sup = &usage_name, \
160f4d01a25SPaul E. McKenney __SRCU_DEP_MAP_INIT(name)
161f4d01a25SPaul E. McKenney
16295433f72SPaul E. McKenney #define __SRCU_STRUCT_INIT_MODULE(name, usage_name) \
163f4d01a25SPaul E. McKenney { \
16495433f72SPaul E. McKenney __SRCU_STRUCT_INIT_COMMON(name, usage_name) \
165d8be8173SPaul E. McKenney }
166d8be8173SPaul E. McKenney
16795433f72SPaul E. McKenney #define __SRCU_STRUCT_INIT(name, usage_name, pcpu_name) \
168f4d01a25SPaul E. McKenney { \
169f4d01a25SPaul E. McKenney .sda = &pcpu_name, \
170795e7efeSPaul E. McKenney .srcu_ctrp = &pcpu_name.srcu_ctrs[0], \
17195433f72SPaul E. McKenney __SRCU_STRUCT_INIT_COMMON(name, usage_name) \
172d8be8173SPaul E. McKenney }
173d8be8173SPaul E. McKenney
174d8be8173SPaul E. McKenney /*
175d8be8173SPaul E. McKenney * Define and initialize a srcu struct at build time.
176d8be8173SPaul E. McKenney * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
177d8be8173SPaul E. McKenney *
178d8be8173SPaul E. McKenney * Note that although DEFINE_STATIC_SRCU() hides the name from other
179d8be8173SPaul E. McKenney * files, the per-CPU variable rules nevertheless require that the
180d8be8173SPaul E. McKenney * chosen name be globally unique. These rules also prohibit use of
181d8be8173SPaul E. McKenney * DEFINE_STATIC_SRCU() within a function. If these rules are too
182d8be8173SPaul E. McKenney * restrictive, declare the srcu_struct manually. For example, in
183d8be8173SPaul E. McKenney * each file:
184d8be8173SPaul E. McKenney *
185d8be8173SPaul E. McKenney * static struct srcu_struct my_srcu;
186d8be8173SPaul E. McKenney *
187d8be8173SPaul E. McKenney * Then, before the first use of each my_srcu, manually initialize it:
188d8be8173SPaul E. McKenney *
189d8be8173SPaul E. McKenney * init_srcu_struct(&my_srcu);
190d8be8173SPaul E. McKenney *
191d8be8173SPaul E. McKenney * See include/linux/percpu-defs.h for the rules on per-CPU variables.
192d8be8173SPaul E. McKenney */
193fe15b50cSPaul E. McKenney #ifdef MODULE
194fe15b50cSPaul E. McKenney # define __DEFINE_SRCU(name, is_static) \
19503200b5cSPaul E. McKenney static struct srcu_usage name##_srcu_usage = __SRCU_USAGE_INIT(name##_srcu_usage); \
19695433f72SPaul E. McKenney is_static struct srcu_struct name = __SRCU_STRUCT_INIT_MODULE(name, name##_srcu_usage); \
197db8f1471SAlexander Aring extern struct srcu_struct * const __srcu_struct_##name; \
198056b89e7SJoel Fernandes (Google) struct srcu_struct * const __srcu_struct_##name \
199fe15b50cSPaul E. McKenney __section("___srcu_struct_ptrs") = &name
200fe15b50cSPaul E. McKenney #else
201d8be8173SPaul E. McKenney # define __DEFINE_SRCU(name, is_static) \
202da915ad5SPaul E. McKenney static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data); \
20303200b5cSPaul E. McKenney static struct srcu_usage name##_srcu_usage = __SRCU_USAGE_INIT(name##_srcu_usage); \
204fe15b50cSPaul E. McKenney is_static struct srcu_struct name = \
20595433f72SPaul E. McKenney __SRCU_STRUCT_INIT(name, name##_srcu_usage, name##_srcu_data)
206fe15b50cSPaul E. McKenney #endif
207d8be8173SPaul E. McKenney #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
208d8be8173SPaul E. McKenney #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
209d8be8173SPaul E. McKenney
21044397115SPaul E. McKenney int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
211aacb5d91SPaul E. McKenney void synchronize_srcu_expedited(struct srcu_struct *ssp);
212aacb5d91SPaul E. McKenney void srcu_barrier(struct srcu_struct *ssp);
213aacb5d91SPaul E. McKenney void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
214d8be8173SPaul E. McKenney
215f4bde41dSPaul E. McKenney // Converts a per-CPU pointer to an ->srcu_ctrs[] array element to that
216f4bde41dSPaul E. McKenney // element's index.
__srcu_ptr_to_ctr(struct srcu_struct * ssp,struct srcu_ctr __percpu * scpp)217f4bde41dSPaul E. McKenney static inline bool __srcu_ptr_to_ctr(struct srcu_struct *ssp, struct srcu_ctr __percpu *scpp)
218f4bde41dSPaul E. McKenney {
219f4bde41dSPaul E. McKenney return scpp - &ssp->sda->srcu_ctrs[0];
220f4bde41dSPaul E. McKenney }
221f4bde41dSPaul E. McKenney
2224937096bSPaul E. McKenney // Converts an integer to a per-CPU pointer to the corresponding
2234937096bSPaul E. McKenney // ->srcu_ctrs[] array element.
__srcu_ctr_to_ptr(struct srcu_struct * ssp,int idx)2244937096bSPaul E. McKenney static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ssp, int idx)
2254937096bSPaul E. McKenney {
2264937096bSPaul E. McKenney return &ssp->sda->srcu_ctrs[idx];
2274937096bSPaul E. McKenney }
2284937096bSPaul E. McKenney
229bb94b12eSPaul E. McKenney /*
230bb94b12eSPaul E. McKenney * Counts the new reader in the appropriate per-CPU element of the
231c4020620SPaul E. McKenney * srcu_struct. Returns a pointer that must be passed to the matching
232c4020620SPaul E. McKenney * srcu_read_unlock_fast().
233c4020620SPaul E. McKenney *
234*3cec2745SPaul E. McKenney * Note that both this_cpu_inc() and atomic_long_inc() are RCU read-side
235*3cec2745SPaul E. McKenney * critical sections either because they disables interrupts, because they
236*3cec2745SPaul E. McKenney * are a single instruction, or because they are a read-modify-write atomic
237*3cec2745SPaul E. McKenney * operation, depending on the whims of the architecture.
238*3cec2745SPaul E. McKenney *
239*3cec2745SPaul E. McKenney * This means that __srcu_read_lock_fast() is not all that fast
240*3cec2745SPaul E. McKenney * on architectures that support NMIs but do not supply NMI-safe
241*3cec2745SPaul E. McKenney * implementations of this_cpu_inc().
242c4020620SPaul E. McKenney */
__srcu_read_lock_fast(struct srcu_struct * ssp)243c4020620SPaul E. McKenney static inline struct srcu_ctr __percpu *__srcu_read_lock_fast(struct srcu_struct *ssp)
244c4020620SPaul E. McKenney {
245c4020620SPaul E. McKenney struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
246c4020620SPaul E. McKenney
247c4020620SPaul E. McKenney RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_fast().");
248*3cec2745SPaul E. McKenney if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
249c4020620SPaul E. McKenney this_cpu_inc(scp->srcu_locks.counter); /* Y */
250*3cec2745SPaul E. McKenney else
251*3cec2745SPaul E. McKenney atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); /* Z */
252c4020620SPaul E. McKenney barrier(); /* Avoid leaking the critical section. */
253c4020620SPaul E. McKenney return scp;
254c4020620SPaul E. McKenney }
255c4020620SPaul E. McKenney
256c4020620SPaul E. McKenney /*
257c4020620SPaul E. McKenney * Removes the count for the old reader from the appropriate
258c4020620SPaul E. McKenney * per-CPU element of the srcu_struct. Note that this may well be a
259c4020620SPaul E. McKenney * different CPU than that which was incremented by the corresponding
260c4020620SPaul E. McKenney * srcu_read_lock_fast(), but it must be within the same task.
261c4020620SPaul E. McKenney *
262*3cec2745SPaul E. McKenney * Note that both this_cpu_inc() and atomic_long_inc() are RCU read-side
263*3cec2745SPaul E. McKenney * critical sections either because they disables interrupts, because they
264*3cec2745SPaul E. McKenney * are a single instruction, or because they are a read-modify-write atomic
265*3cec2745SPaul E. McKenney * operation, depending on the whims of the architecture.
266*3cec2745SPaul E. McKenney *
267*3cec2745SPaul E. McKenney * This means that __srcu_read_unlock_fast() is not all that fast
268*3cec2745SPaul E. McKenney * on architectures that support NMIs but do not supply NMI-safe
269*3cec2745SPaul E. McKenney * implementations of this_cpu_inc().
270c4020620SPaul E. McKenney */
__srcu_read_unlock_fast(struct srcu_struct * ssp,struct srcu_ctr __percpu * scp)271c4020620SPaul E. McKenney static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
272c4020620SPaul E. McKenney {
273c4020620SPaul E. McKenney barrier(); /* Avoid leaking the critical section. */
274*3cec2745SPaul E. McKenney if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
275c4020620SPaul E. McKenney this_cpu_inc(scp->srcu_unlocks.counter); /* Z */
276*3cec2745SPaul E. McKenney else
277*3cec2745SPaul E. McKenney atomic_long_inc(raw_cpu_ptr(&scp->srcu_unlocks)); /* Z */
278c4020620SPaul E. McKenney RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_fast().");
279c4020620SPaul E. McKenney }
280c4020620SPaul E. McKenney
281c4020620SPaul E. McKenney /*
282c4020620SPaul E. McKenney * Counts the new reader in the appropriate per-CPU element of the
283bb94b12eSPaul E. McKenney * srcu_struct. Returns an index that must be passed to the matching
284bb94b12eSPaul E. McKenney * srcu_read_unlock_lite().
285bb94b12eSPaul E. McKenney *
286bb94b12eSPaul E. McKenney * Note that this_cpu_inc() is an RCU read-side critical section either
287bb94b12eSPaul E. McKenney * because it disables interrupts, because it is a single instruction,
288bb94b12eSPaul E. McKenney * or because it is a read-modify-write atomic operation, depending on
289bb94b12eSPaul E. McKenney * the whims of the architecture.
290bb94b12eSPaul E. McKenney */
__srcu_read_lock_lite(struct srcu_struct * ssp)291bb94b12eSPaul E. McKenney static inline int __srcu_read_lock_lite(struct srcu_struct *ssp)
292bb94b12eSPaul E. McKenney {
293795e7efeSPaul E. McKenney struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
294bb94b12eSPaul E. McKenney
295bb94b12eSPaul E. McKenney RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_lite().");
296795e7efeSPaul E. McKenney this_cpu_inc(scp->srcu_locks.counter); /* Y */
297bb94b12eSPaul E. McKenney barrier(); /* Avoid leaking the critical section. */
298f4bde41dSPaul E. McKenney return __srcu_ptr_to_ctr(ssp, scp);
299bb94b12eSPaul E. McKenney }
300bb94b12eSPaul E. McKenney
301bb94b12eSPaul E. McKenney /*
302bb94b12eSPaul E. McKenney * Removes the count for the old reader from the appropriate
303bb94b12eSPaul E. McKenney * per-CPU element of the srcu_struct. Note that this may well be a
304bb94b12eSPaul E. McKenney * different CPU than that which was incremented by the corresponding
305bb94b12eSPaul E. McKenney * srcu_read_lock_lite(), but it must be within the same task.
306bb94b12eSPaul E. McKenney *
307bb94b12eSPaul E. McKenney * Note that this_cpu_inc() is an RCU read-side critical section either
308bb94b12eSPaul E. McKenney * because it disables interrupts, because it is a single instruction,
309bb94b12eSPaul E. McKenney * or because it is a read-modify-write atomic operation, depending on
310bb94b12eSPaul E. McKenney * the whims of the architecture.
311bb94b12eSPaul E. McKenney */
__srcu_read_unlock_lite(struct srcu_struct * ssp,int idx)312bb94b12eSPaul E. McKenney static inline void __srcu_read_unlock_lite(struct srcu_struct *ssp, int idx)
313bb94b12eSPaul E. McKenney {
314bb94b12eSPaul E. McKenney barrier(); /* Avoid leaking the critical section. */
3154937096bSPaul E. McKenney this_cpu_inc(__srcu_ctr_to_ptr(ssp, idx)->srcu_unlocks.counter); /* Z */
316bb94b12eSPaul E. McKenney RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_lite().");
317bb94b12eSPaul E. McKenney }
318bb94b12eSPaul E. McKenney
3199407f5c3SPaul E. McKenney void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor);
3209407f5c3SPaul E. McKenney
321780818a6SPaul E. McKenney // Record reader usage even for CONFIG_PROVE_RCU=n kernels. This is
322780818a6SPaul E. McKenney // needed only for flavors that require grace-period smp_mb() calls to be
323780818a6SPaul E. McKenney // promoted to synchronize_rcu().
srcu_check_read_flavor_force(struct srcu_struct * ssp,int read_flavor)324780818a6SPaul E. McKenney static inline void srcu_check_read_flavor_force(struct srcu_struct *ssp, int read_flavor)
3259407f5c3SPaul E. McKenney {
3269407f5c3SPaul E. McKenney struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
3279407f5c3SPaul E. McKenney
328780818a6SPaul E. McKenney if (likely(READ_ONCE(sdp->srcu_reader_flavor) & read_flavor))
3299407f5c3SPaul E. McKenney return;
3309407f5c3SPaul E. McKenney
331cfb07b07SPaul E. McKenney // Note that the cmpxchg() in __srcu_check_read_flavor() is fully ordered.
332780818a6SPaul E. McKenney __srcu_check_read_flavor(ssp, read_flavor);
3339407f5c3SPaul E. McKenney }
3349407f5c3SPaul E. McKenney
3359407f5c3SPaul E. McKenney // Record non-_lite() usage only for CONFIG_PROVE_RCU=y kernels.
srcu_check_read_flavor(struct srcu_struct * ssp,int read_flavor)3369407f5c3SPaul E. McKenney static inline void srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor)
3379407f5c3SPaul E. McKenney {
3389407f5c3SPaul E. McKenney if (IS_ENABLED(CONFIG_PROVE_RCU))
3399407f5c3SPaul E. McKenney __srcu_check_read_flavor(ssp, read_flavor);
3409407f5c3SPaul E. McKenney }
3419407f5c3SPaul E. McKenney
342d8be8173SPaul E. McKenney #endif
343