1943f0edbSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2943f0edbSThomas Gleixner
3943f0edbSThomas Gleixner /*
4943f0edbSThomas Gleixner * RT-specific reader/writer semaphores and reader/writer locks
5943f0edbSThomas Gleixner *
6943f0edbSThomas Gleixner * down_write/write_lock()
7943f0edbSThomas Gleixner * 1) Lock rtmutex
8943f0edbSThomas Gleixner * 2) Remove the reader BIAS to force readers into the slow path
9943f0edbSThomas Gleixner * 3) Wait until all readers have left the critical section
10943f0edbSThomas Gleixner * 4) Mark it write locked
11943f0edbSThomas Gleixner *
12943f0edbSThomas Gleixner * up_write/write_unlock()
13943f0edbSThomas Gleixner * 1) Remove the write locked marker
14943f0edbSThomas Gleixner * 2) Set the reader BIAS, so readers can use the fast path again
15943f0edbSThomas Gleixner * 3) Unlock rtmutex, to release blocked readers
16943f0edbSThomas Gleixner *
17943f0edbSThomas Gleixner * down_read/read_lock()
18943f0edbSThomas Gleixner * 1) Try fast path acquisition (reader BIAS is set)
19943f0edbSThomas Gleixner * 2) Take tmutex::wait_lock, which protects the writelocked flag
20943f0edbSThomas Gleixner * 3) If !writelocked, acquire it for read
21943f0edbSThomas Gleixner * 4) If writelocked, block on tmutex
22943f0edbSThomas Gleixner * 5) unlock rtmutex, goto 1)
23943f0edbSThomas Gleixner *
24943f0edbSThomas Gleixner * up_read/read_unlock()
25943f0edbSThomas Gleixner * 1) Try fast path release (reader count != 1)
26943f0edbSThomas Gleixner * 2) Wake the writer waiting in down_write()/write_lock() #3
27943f0edbSThomas Gleixner *
28943f0edbSThomas Gleixner * down_read/read_lock()#3 has the consequence, that rw semaphores and rw
29943f0edbSThomas Gleixner * locks on RT are not writer fair, but writers, which should be avoided in
30943f0edbSThomas Gleixner * RT tasks (think mmap_sem), are subject to the rtmutex priority/DL
31943f0edbSThomas Gleixner * inheritance mechanism.
32943f0edbSThomas Gleixner *
33943f0edbSThomas Gleixner * It's possible to make the rw primitives writer fair by keeping a list of
34943f0edbSThomas Gleixner * active readers. A blocked writer would force all newly incoming readers
35943f0edbSThomas Gleixner * to block on the rtmutex, but the rtmutex would have to be proxy locked
36943f0edbSThomas Gleixner * for one reader after the other. We can't use multi-reader inheritance
37943f0edbSThomas Gleixner * because there is no way to support that with SCHED_DEADLINE.
38943f0edbSThomas Gleixner * Implementing the one by one reader boosting/handover mechanism is a
39943f0edbSThomas Gleixner * major surgery for a very dubious value.
40943f0edbSThomas Gleixner *
41943f0edbSThomas Gleixner * The risk of writer starvation is there, but the pathological use cases
42943f0edbSThomas Gleixner * which trigger it are not necessarily the typical RT workloads.
43943f0edbSThomas Gleixner *
4481121524SBoqun Feng * Fast-path orderings:
4581121524SBoqun Feng * The lock/unlock of readers can run in fast paths: lock and unlock are only
4681121524SBoqun Feng * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
4781121524SBoqun Feng * semantics of rwbase_rt. Atomic ops should thus provide _acquire()
4881121524SBoqun Feng * and _release() (or stronger).
4981121524SBoqun Feng *
50943f0edbSThomas Gleixner * Common code shared between RT rw_semaphore and rwlock
51943f0edbSThomas Gleixner */
52943f0edbSThomas Gleixner
rwbase_read_trylock(struct rwbase_rt * rwb)53943f0edbSThomas Gleixner static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
54943f0edbSThomas Gleixner {
55943f0edbSThomas Gleixner int r;
56943f0edbSThomas Gleixner
57943f0edbSThomas Gleixner /*
58943f0edbSThomas Gleixner * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
59943f0edbSThomas Gleixner * set.
60943f0edbSThomas Gleixner */
61943f0edbSThomas Gleixner for (r = atomic_read(&rwb->readers); r < 0;) {
62c78416d1SDavidlohr Bueso if (likely(atomic_try_cmpxchg_acquire(&rwb->readers, &r, r + 1)))
63943f0edbSThomas Gleixner return 1;
64943f0edbSThomas Gleixner }
65943f0edbSThomas Gleixner return 0;
66943f0edbSThomas Gleixner }
67943f0edbSThomas Gleixner
__rwbase_read_lock(struct rwbase_rt * rwb,unsigned int state)68943f0edbSThomas Gleixner static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
69943f0edbSThomas Gleixner unsigned int state)
70943f0edbSThomas Gleixner {
71943f0edbSThomas Gleixner struct rt_mutex_base *rtm = &rwb->rtmutex;
72*894d1b3dSPeter Zijlstra DEFINE_WAKE_Q(wake_q);
73943f0edbSThomas Gleixner int ret;
74943f0edbSThomas Gleixner
75d14f9e93SSebastian Andrzej Siewior rwbase_pre_schedule();
76943f0edbSThomas Gleixner raw_spin_lock_irq(&rtm->wait_lock);
77943f0edbSThomas Gleixner
78943f0edbSThomas Gleixner /*
79943f0edbSThomas Gleixner * Call into the slow lock path with the rtmutex->wait_lock
80943f0edbSThomas Gleixner * held, so this can't result in the following race:
81943f0edbSThomas Gleixner *
82943f0edbSThomas Gleixner * Reader1 Reader2 Writer
83943f0edbSThomas Gleixner * down_read()
84943f0edbSThomas Gleixner * down_write()
85943f0edbSThomas Gleixner * rtmutex_lock(m)
86943f0edbSThomas Gleixner * wait()
87943f0edbSThomas Gleixner * down_read()
88943f0edbSThomas Gleixner * unlock(m->wait_lock)
89943f0edbSThomas Gleixner * up_read()
90943f0edbSThomas Gleixner * wake(Writer)
91943f0edbSThomas Gleixner * lock(m->wait_lock)
92943f0edbSThomas Gleixner * sem->writelocked=true
93943f0edbSThomas Gleixner * unlock(m->wait_lock)
94943f0edbSThomas Gleixner *
95943f0edbSThomas Gleixner * up_write()
96943f0edbSThomas Gleixner * sem->writelocked=false
97943f0edbSThomas Gleixner * rtmutex_unlock(m)
98943f0edbSThomas Gleixner * down_read()
99943f0edbSThomas Gleixner * down_write()
100943f0edbSThomas Gleixner * rtmutex_lock(m)
101943f0edbSThomas Gleixner * wait()
102943f0edbSThomas Gleixner * rtmutex_lock(m)
103943f0edbSThomas Gleixner *
104943f0edbSThomas Gleixner * That would put Reader1 behind the writer waiting on
105943f0edbSThomas Gleixner * Reader2 to call up_read(), which might be unbound.
106943f0edbSThomas Gleixner */
107943f0edbSThomas Gleixner
108ee042be1SNamhyung Kim trace_contention_begin(rwb, LCB_F_RT | LCB_F_READ);
109ee042be1SNamhyung Kim
110943f0edbSThomas Gleixner /*
111943f0edbSThomas Gleixner * For rwlocks this returns 0 unconditionally, so the below
112943f0edbSThomas Gleixner * !ret conditionals are optimized out.
113943f0edbSThomas Gleixner */
114*894d1b3dSPeter Zijlstra ret = rwbase_rtmutex_slowlock_locked(rtm, state, &wake_q);
115943f0edbSThomas Gleixner
116943f0edbSThomas Gleixner /*
117943f0edbSThomas Gleixner * On success the rtmutex is held, so there can't be a writer
118943f0edbSThomas Gleixner * active. Increment the reader count and immediately drop the
119943f0edbSThomas Gleixner * rtmutex again.
120943f0edbSThomas Gleixner *
121943f0edbSThomas Gleixner * rtmutex->wait_lock has to be unlocked in any case of course.
122943f0edbSThomas Gleixner */
123943f0edbSThomas Gleixner if (!ret)
124943f0edbSThomas Gleixner atomic_inc(&rwb->readers);
125*894d1b3dSPeter Zijlstra
126*894d1b3dSPeter Zijlstra preempt_disable();
127943f0edbSThomas Gleixner raw_spin_unlock_irq(&rtm->wait_lock);
128*894d1b3dSPeter Zijlstra wake_up_q(&wake_q);
129*894d1b3dSPeter Zijlstra preempt_enable();
130*894d1b3dSPeter Zijlstra
131943f0edbSThomas Gleixner if (!ret)
132943f0edbSThomas Gleixner rwbase_rtmutex_unlock(rtm);
133ee042be1SNamhyung Kim
134ee042be1SNamhyung Kim trace_contention_end(rwb, ret);
135d14f9e93SSebastian Andrzej Siewior rwbase_post_schedule();
136943f0edbSThomas Gleixner return ret;
137943f0edbSThomas Gleixner }
138943f0edbSThomas Gleixner
rwbase_read_lock(struct rwbase_rt * rwb,unsigned int state)139943f0edbSThomas Gleixner static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
140943f0edbSThomas Gleixner unsigned int state)
141943f0edbSThomas Gleixner {
14245f67f30SThomas Gleixner lockdep_assert(!current->pi_blocked_on);
14345f67f30SThomas Gleixner
144943f0edbSThomas Gleixner if (rwbase_read_trylock(rwb))
145943f0edbSThomas Gleixner return 0;
146943f0edbSThomas Gleixner
147943f0edbSThomas Gleixner return __rwbase_read_lock(rwb, state);
148943f0edbSThomas Gleixner }
149943f0edbSThomas Gleixner
__rwbase_read_unlock(struct rwbase_rt * rwb,unsigned int state)150943f0edbSThomas Gleixner static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
151943f0edbSThomas Gleixner unsigned int state)
152943f0edbSThomas Gleixner {
153943f0edbSThomas Gleixner struct rt_mutex_base *rtm = &rwb->rtmutex;
154943f0edbSThomas Gleixner struct task_struct *owner;
1559321f815SThomas Gleixner DEFINE_RT_WAKE_Q(wqh);
156943f0edbSThomas Gleixner
157943f0edbSThomas Gleixner raw_spin_lock_irq(&rtm->wait_lock);
158943f0edbSThomas Gleixner /*
159943f0edbSThomas Gleixner * Wake the writer, i.e. the rtmutex owner. It might release the
160943f0edbSThomas Gleixner * rtmutex concurrently in the fast path (due to a signal), but to
161943f0edbSThomas Gleixner * clean up rwb->readers it needs to acquire rtm->wait_lock. The
162943f0edbSThomas Gleixner * worst case which can happen is a spurious wakeup.
163943f0edbSThomas Gleixner */
164943f0edbSThomas Gleixner owner = rt_mutex_owner(rtm);
165943f0edbSThomas Gleixner if (owner)
1669321f815SThomas Gleixner rt_mutex_wake_q_add_task(&wqh, owner, state);
167943f0edbSThomas Gleixner
1689321f815SThomas Gleixner /* Pairs with the preempt_enable in rt_mutex_wake_up_q() */
1699321f815SThomas Gleixner preempt_disable();
170943f0edbSThomas Gleixner raw_spin_unlock_irq(&rtm->wait_lock);
1719321f815SThomas Gleixner rt_mutex_wake_up_q(&wqh);
172943f0edbSThomas Gleixner }
173943f0edbSThomas Gleixner
rwbase_read_unlock(struct rwbase_rt * rwb,unsigned int state)174943f0edbSThomas Gleixner static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
175943f0edbSThomas Gleixner unsigned int state)
176943f0edbSThomas Gleixner {
177943f0edbSThomas Gleixner /*
178943f0edbSThomas Gleixner * rwb->readers can only hit 0 when a writer is waiting for the
179943f0edbSThomas Gleixner * active readers to leave the critical section.
18081121524SBoqun Feng *
18181121524SBoqun Feng * dec_and_test() is fully ordered, provides RELEASE.
182943f0edbSThomas Gleixner */
183943f0edbSThomas Gleixner if (unlikely(atomic_dec_and_test(&rwb->readers)))
184943f0edbSThomas Gleixner __rwbase_read_unlock(rwb, state);
185943f0edbSThomas Gleixner }
186943f0edbSThomas Gleixner
__rwbase_write_unlock(struct rwbase_rt * rwb,int bias,unsigned long flags)187943f0edbSThomas Gleixner static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
188943f0edbSThomas Gleixner unsigned long flags)
189943f0edbSThomas Gleixner {
190943f0edbSThomas Gleixner struct rt_mutex_base *rtm = &rwb->rtmutex;
191943f0edbSThomas Gleixner
19281121524SBoqun Feng /*
19381121524SBoqun Feng * _release() is needed in case that reader is in fast path, pairing
194c78416d1SDavidlohr Bueso * with atomic_try_cmpxchg_acquire() in rwbase_read_trylock().
19581121524SBoqun Feng */
19681121524SBoqun Feng (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
197943f0edbSThomas Gleixner raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
198943f0edbSThomas Gleixner rwbase_rtmutex_unlock(rtm);
199943f0edbSThomas Gleixner }
200943f0edbSThomas Gleixner
rwbase_write_unlock(struct rwbase_rt * rwb)201943f0edbSThomas Gleixner static inline void rwbase_write_unlock(struct rwbase_rt *rwb)
202943f0edbSThomas Gleixner {
203943f0edbSThomas Gleixner struct rt_mutex_base *rtm = &rwb->rtmutex;
204943f0edbSThomas Gleixner unsigned long flags;
205943f0edbSThomas Gleixner
206943f0edbSThomas Gleixner raw_spin_lock_irqsave(&rtm->wait_lock, flags);
207943f0edbSThomas Gleixner __rwbase_write_unlock(rwb, WRITER_BIAS, flags);
208943f0edbSThomas Gleixner }
209943f0edbSThomas Gleixner
rwbase_write_downgrade(struct rwbase_rt * rwb)210943f0edbSThomas Gleixner static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
211943f0edbSThomas Gleixner {
212943f0edbSThomas Gleixner struct rt_mutex_base *rtm = &rwb->rtmutex;
213943f0edbSThomas Gleixner unsigned long flags;
214943f0edbSThomas Gleixner
215943f0edbSThomas Gleixner raw_spin_lock_irqsave(&rtm->wait_lock, flags);
216943f0edbSThomas Gleixner /* Release it and account current as reader */
217943f0edbSThomas Gleixner __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
218943f0edbSThomas Gleixner }
219943f0edbSThomas Gleixner
__rwbase_write_trylock(struct rwbase_rt * rwb)220616be87eSPeter Zijlstra static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
221616be87eSPeter Zijlstra {
222616be87eSPeter Zijlstra /* Can do without CAS because we're serialized by wait_lock. */
223616be87eSPeter Zijlstra lockdep_assert_held(&rwb->rtmutex.wait_lock);
224616be87eSPeter Zijlstra
22581121524SBoqun Feng /*
22681121524SBoqun Feng * _acquire is needed in case the reader is in the fast path, pairing
22781121524SBoqun Feng * with rwbase_read_unlock(), provides ACQUIRE.
22881121524SBoqun Feng */
22981121524SBoqun Feng if (!atomic_read_acquire(&rwb->readers)) {
230616be87eSPeter Zijlstra atomic_set(&rwb->readers, WRITER_BIAS);
231616be87eSPeter Zijlstra return 1;
232616be87eSPeter Zijlstra }
233616be87eSPeter Zijlstra
234616be87eSPeter Zijlstra return 0;
235616be87eSPeter Zijlstra }
236616be87eSPeter Zijlstra
rwbase_write_lock(struct rwbase_rt * rwb,unsigned int state)237943f0edbSThomas Gleixner static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
238943f0edbSThomas Gleixner unsigned int state)
239943f0edbSThomas Gleixner {
240943f0edbSThomas Gleixner struct rt_mutex_base *rtm = &rwb->rtmutex;
241943f0edbSThomas Gleixner unsigned long flags;
242943f0edbSThomas Gleixner
243943f0edbSThomas Gleixner /* Take the rtmutex as a first step */
244943f0edbSThomas Gleixner if (rwbase_rtmutex_lock_state(rtm, state))
245943f0edbSThomas Gleixner return -EINTR;
246943f0edbSThomas Gleixner
247943f0edbSThomas Gleixner /* Force readers into slow path */
248943f0edbSThomas Gleixner atomic_sub(READER_BIAS, &rwb->readers);
249943f0edbSThomas Gleixner
250d14f9e93SSebastian Andrzej Siewior rwbase_pre_schedule();
251d14f9e93SSebastian Andrzej Siewior
252943f0edbSThomas Gleixner raw_spin_lock_irqsave(&rtm->wait_lock, flags);
253616be87eSPeter Zijlstra if (__rwbase_write_trylock(rwb))
254616be87eSPeter Zijlstra goto out_unlock;
255943f0edbSThomas Gleixner
256616be87eSPeter Zijlstra rwbase_set_and_save_current_state(state);
257ee042be1SNamhyung Kim trace_contention_begin(rwb, LCB_F_RT | LCB_F_WRITE);
258616be87eSPeter Zijlstra for (;;) {
259943f0edbSThomas Gleixner /* Optimized out for rwlocks */
260943f0edbSThomas Gleixner if (rwbase_signal_pending_state(state, current)) {
2617687201eSPeter Zijlstra rwbase_restore_current_state();
262943f0edbSThomas Gleixner __rwbase_write_unlock(rwb, 0, flags);
263d14f9e93SSebastian Andrzej Siewior rwbase_post_schedule();
264ee042be1SNamhyung Kim trace_contention_end(rwb, -EINTR);
265943f0edbSThomas Gleixner return -EINTR;
266943f0edbSThomas Gleixner }
267616be87eSPeter Zijlstra
268616be87eSPeter Zijlstra if (__rwbase_write_trylock(rwb))
269616be87eSPeter Zijlstra break;
270616be87eSPeter Zijlstra
271943f0edbSThomas Gleixner raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
272943f0edbSThomas Gleixner rwbase_schedule();
273943f0edbSThomas Gleixner raw_spin_lock_irqsave(&rtm->wait_lock, flags);
274943f0edbSThomas Gleixner
275616be87eSPeter Zijlstra set_current_state(state);
276616be87eSPeter Zijlstra }
277943f0edbSThomas Gleixner rwbase_restore_current_state();
278ee042be1SNamhyung Kim trace_contention_end(rwb, 0);
279616be87eSPeter Zijlstra
280616be87eSPeter Zijlstra out_unlock:
281943f0edbSThomas Gleixner raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
282d14f9e93SSebastian Andrzej Siewior rwbase_post_schedule();
283943f0edbSThomas Gleixner return 0;
284943f0edbSThomas Gleixner }
285943f0edbSThomas Gleixner
rwbase_write_trylock(struct rwbase_rt * rwb)286943f0edbSThomas Gleixner static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
287943f0edbSThomas Gleixner {
288943f0edbSThomas Gleixner struct rt_mutex_base *rtm = &rwb->rtmutex;
289943f0edbSThomas Gleixner unsigned long flags;
290943f0edbSThomas Gleixner
291943f0edbSThomas Gleixner if (!rwbase_rtmutex_trylock(rtm))
292943f0edbSThomas Gleixner return 0;
293943f0edbSThomas Gleixner
294943f0edbSThomas Gleixner atomic_sub(READER_BIAS, &rwb->readers);
295943f0edbSThomas Gleixner
296943f0edbSThomas Gleixner raw_spin_lock_irqsave(&rtm->wait_lock, flags);
297616be87eSPeter Zijlstra if (__rwbase_write_trylock(rwb)) {
298943f0edbSThomas Gleixner raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
299943f0edbSThomas Gleixner return 1;
300943f0edbSThomas Gleixner }
301943f0edbSThomas Gleixner __rwbase_write_unlock(rwb, 0, flags);
302943f0edbSThomas Gleixner return 0;
303943f0edbSThomas Gleixner }
304