xref: /linux-6.15/kernel/locking/percpu-rwsem.c (revision 75ff6457)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/atomic.h>
3 #include <linux/rwsem.h>
4 #include <linux/percpu.h>
5 #include <linux/lockdep.h>
6 #include <linux/percpu-rwsem.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/errno.h>
10 
11 #include "rwsem.h"
12 
13 int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
14 			const char *name, struct lock_class_key *key)
15 {
16 	sem->read_count = alloc_percpu(int);
17 	if (unlikely(!sem->read_count))
18 		return -ENOMEM;
19 
20 	/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
21 	rcu_sync_init(&sem->rss);
22 	init_rwsem(&sem->rw_sem);
23 	rcuwait_init(&sem->writer);
24 	sem->readers_block = 0;
25 #ifdef CONFIG_DEBUG_LOCK_ALLOC
26 	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
27 	lockdep_init_map(&sem->dep_map, name, key, 0);
28 #endif
29 	return 0;
30 }
31 EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
32 
33 void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
34 {
35 	/*
36 	 * XXX: temporary kludge. The error path in alloc_super()
37 	 * assumes that percpu_free_rwsem() is safe after kzalloc().
38 	 */
39 	if (!sem->read_count)
40 		return;
41 
42 	rcu_sync_dtor(&sem->rss);
43 	free_percpu(sem->read_count);
44 	sem->read_count = NULL; /* catch use after free bugs */
45 }
46 EXPORT_SYMBOL_GPL(percpu_free_rwsem);
47 
48 static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
49 {
50 	__this_cpu_inc(*sem->read_count);
51 
52 	/*
53 	 * Due to having preemption disabled the decrement happens on
54 	 * the same CPU as the increment, avoiding the
55 	 * increment-on-one-CPU-and-decrement-on-another problem.
56 	 *
57 	 * If the reader misses the writer's assignment of readers_block, then
58 	 * the writer is guaranteed to see the reader's increment.
59 	 *
60 	 * Conversely, any readers that increment their sem->read_count after
61 	 * the writer looks are guaranteed to see the readers_block value,
62 	 * which in turn means that they are guaranteed to immediately
63 	 * decrement their sem->read_count, so that it doesn't matter that the
64 	 * writer missed them.
65 	 */
66 
67 	smp_mb(); /* A matches D */
68 
69 	/*
70 	 * If !readers_block the critical section starts here, matched by the
71 	 * release in percpu_up_write().
72 	 */
73 	if (likely(!smp_load_acquire(&sem->readers_block)))
74 		return true;
75 
76 	__this_cpu_dec(*sem->read_count);
77 
78 	/* Prod writer to re-evaluate readers_active_check() */
79 	rcuwait_wake_up(&sem->writer);
80 
81 	return false;
82 }
83 
84 bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
85 {
86 	if (__percpu_down_read_trylock(sem))
87 		return true;
88 
89 	if (try)
90 		return false;
91 
92 	/*
93 	 * We either call schedule() in the wait, or we'll fall through
94 	 * and reschedule on the preempt_enable() in percpu_down_read().
95 	 */
96 	preempt_enable_no_resched();
97 
98 	/*
99 	 * Avoid lockdep for the down/up_read() we already have them.
100 	 */
101 	__down_read(&sem->rw_sem);
102 	this_cpu_inc(*sem->read_count);
103 	__up_read(&sem->rw_sem);
104 
105 	preempt_disable();
106 	return true;
107 }
108 EXPORT_SYMBOL_GPL(__percpu_down_read);
109 
110 void __percpu_up_read(struct percpu_rw_semaphore *sem)
111 {
112 	smp_mb(); /* B matches C */
113 	/*
114 	 * In other words, if they see our decrement (presumably to aggregate
115 	 * zero, as that is the only time it matters) they will also see our
116 	 * critical section.
117 	 */
118 	__this_cpu_dec(*sem->read_count);
119 
120 	/* Prod writer to recheck readers_active */
121 	rcuwait_wake_up(&sem->writer);
122 }
123 EXPORT_SYMBOL_GPL(__percpu_up_read);
124 
125 #define per_cpu_sum(var)						\
126 ({									\
127 	typeof(var) __sum = 0;						\
128 	int cpu;							\
129 	compiletime_assert_atomic_type(__sum);				\
130 	for_each_possible_cpu(cpu)					\
131 		__sum += per_cpu(var, cpu);				\
132 	__sum;								\
133 })
134 
135 /*
136  * Return true if the modular sum of the sem->read_count per-CPU variable is
137  * zero.  If this sum is zero, then it is stable due to the fact that if any
138  * newly arriving readers increment a given counter, they will immediately
139  * decrement that same counter.
140  */
141 static bool readers_active_check(struct percpu_rw_semaphore *sem)
142 {
143 	if (per_cpu_sum(*sem->read_count) != 0)
144 		return false;
145 
146 	/*
147 	 * If we observed the decrement; ensure we see the entire critical
148 	 * section.
149 	 */
150 
151 	smp_mb(); /* C matches B */
152 
153 	return true;
154 }
155 
156 void percpu_down_write(struct percpu_rw_semaphore *sem)
157 {
158 	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
159 
160 	/* Notify readers to take the slow path. */
161 	rcu_sync_enter(&sem->rss);
162 
163 	__down_write(&sem->rw_sem);
164 
165 	/*
166 	 * Notify new readers to block; up until now, and thus throughout the
167 	 * longish rcu_sync_enter() above, new readers could still come in.
168 	 */
169 	WRITE_ONCE(sem->readers_block, 1);
170 
171 	smp_mb(); /* D matches A */
172 
173 	/*
174 	 * If they don't see our writer of readers_block, then we are
175 	 * guaranteed to see their sem->read_count increment, and therefore
176 	 * will wait for them.
177 	 */
178 
179 	/* Wait for all now active readers to complete. */
180 	rcuwait_wait_event(&sem->writer, readers_active_check(sem));
181 }
182 EXPORT_SYMBOL_GPL(percpu_down_write);
183 
184 void percpu_up_write(struct percpu_rw_semaphore *sem)
185 {
186 	rwsem_release(&sem->dep_map, _RET_IP_);
187 
188 	/*
189 	 * Signal the writer is done, no fast path yet.
190 	 *
191 	 * One reason that we cannot just immediately flip to readers_fast is
192 	 * that new readers might fail to see the results of this writer's
193 	 * critical section.
194 	 *
195 	 * Therefore we force it through the slow path which guarantees an
196 	 * acquire and thereby guarantees the critical section's consistency.
197 	 */
198 	smp_store_release(&sem->readers_block, 0);
199 
200 	/*
201 	 * Release the write lock, this will allow readers back in the game.
202 	 */
203 	__up_write(&sem->rw_sem);
204 
205 	/*
206 	 * Once this completes (at least one RCU-sched grace period hence) the
207 	 * reader fast path will be available again. Safe to use outside the
208 	 * exclusive write lock because its counting.
209 	 */
210 	rcu_sync_exit(&sem->rss);
211 }
212 EXPORT_SYMBOL_GPL(percpu_up_write);
213