10f383b6dSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
20f383b6dSThomas Gleixner /*
30f383b6dSThomas Gleixner * PREEMPT_RT substitution for spin/rw_locks
40f383b6dSThomas Gleixner *
50f383b6dSThomas Gleixner * spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to
60f383b6dSThomas Gleixner * resemble the non RT semantics:
70f383b6dSThomas Gleixner *
80f383b6dSThomas Gleixner * - Contrary to plain rtmutexes, spinlocks and rwlocks are state
90f383b6dSThomas Gleixner * preserving. The task state is saved before blocking on the underlying
100f383b6dSThomas Gleixner * rtmutex, and restored when the lock has been acquired. Regular wakeups
110f383b6dSThomas Gleixner * during that time are redirected to the saved state so no wake up is
120f383b6dSThomas Gleixner * missed.
130f383b6dSThomas Gleixner *
140f383b6dSThomas Gleixner * - Non RT spin/rwlocks disable preemption and eventually interrupts.
150f383b6dSThomas Gleixner * Disabling preemption has the side effect of disabling migration and
160f383b6dSThomas Gleixner * preventing RCU grace periods.
170f383b6dSThomas Gleixner *
180f383b6dSThomas Gleixner * The RT substitutions explicitly disable migration and take
190f383b6dSThomas Gleixner * rcu_read_lock() across the lock held section.
200f383b6dSThomas Gleixner */
210f383b6dSThomas Gleixner #include <linux/spinlock.h>
220f383b6dSThomas Gleixner #include <linux/export.h>
230f383b6dSThomas Gleixner
240f383b6dSThomas Gleixner #define RT_MUTEX_BUILD_SPINLOCKS
250f383b6dSThomas Gleixner #include "rtmutex.c"
260f383b6dSThomas Gleixner
27ef1f4804SThomas Gleixner /*
28ef1f4804SThomas Gleixner * __might_resched() skips the state check as rtlocks are state
29ef1f4804SThomas Gleixner * preserving. Take RCU nesting into account as spin/read/write_lock() can
30ef1f4804SThomas Gleixner * legitimately nest into an RCU read side critical section.
31ef1f4804SThomas Gleixner */
32ef1f4804SThomas Gleixner #define RTLOCK_RESCHED_OFFSETS \
33ef1f4804SThomas Gleixner (rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT)
34ef1f4804SThomas Gleixner
35ef1f4804SThomas Gleixner #define rtlock_might_resched() \
36ef1f4804SThomas Gleixner __might_resched(__FILE__, __LINE__, RTLOCK_RESCHED_OFFSETS)
37ef1f4804SThomas Gleixner
rtlock_lock(struct rt_mutex_base * rtm)380f383b6dSThomas Gleixner static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
390f383b6dSThomas Gleixner {
4045f67f30SThomas Gleixner lockdep_assert(!current->pi_blocked_on);
4145f67f30SThomas Gleixner
420f383b6dSThomas Gleixner if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
430f383b6dSThomas Gleixner rtlock_slowlock(rtm);
440f383b6dSThomas Gleixner }
450f383b6dSThomas Gleixner
__rt_spin_lock(spinlock_t * lock)460f383b6dSThomas Gleixner static __always_inline void __rt_spin_lock(spinlock_t *lock)
470f383b6dSThomas Gleixner {
48ef1f4804SThomas Gleixner rtlock_might_resched();
490f383b6dSThomas Gleixner rtlock_lock(&lock->lock);
500f383b6dSThomas Gleixner rcu_read_lock();
510f383b6dSThomas Gleixner migrate_disable();
520f383b6dSThomas Gleixner }
530f383b6dSThomas Gleixner
rt_spin_lock(spinlock_t * lock)54*168660b8SSebastian Andrzej Siewior void __sched rt_spin_lock(spinlock_t *lock) __acquires(RCU)
550f383b6dSThomas Gleixner {
560f383b6dSThomas Gleixner spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
570f383b6dSThomas Gleixner __rt_spin_lock(lock);
580f383b6dSThomas Gleixner }
590f383b6dSThomas Gleixner EXPORT_SYMBOL(rt_spin_lock);
600f383b6dSThomas Gleixner
610f383b6dSThomas Gleixner #ifdef CONFIG_DEBUG_LOCK_ALLOC
rt_spin_lock_nested(spinlock_t * lock,int subclass)620f383b6dSThomas Gleixner void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass)
630f383b6dSThomas Gleixner {
640f383b6dSThomas Gleixner spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
650f383b6dSThomas Gleixner __rt_spin_lock(lock);
660f383b6dSThomas Gleixner }
670f383b6dSThomas Gleixner EXPORT_SYMBOL(rt_spin_lock_nested);
680f383b6dSThomas Gleixner
rt_spin_lock_nest_lock(spinlock_t * lock,struct lockdep_map * nest_lock)690f383b6dSThomas Gleixner void __sched rt_spin_lock_nest_lock(spinlock_t *lock,
700f383b6dSThomas Gleixner struct lockdep_map *nest_lock)
710f383b6dSThomas Gleixner {
720f383b6dSThomas Gleixner spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
730f383b6dSThomas Gleixner __rt_spin_lock(lock);
740f383b6dSThomas Gleixner }
750f383b6dSThomas Gleixner EXPORT_SYMBOL(rt_spin_lock_nest_lock);
760f383b6dSThomas Gleixner #endif
770f383b6dSThomas Gleixner
rt_spin_unlock(spinlock_t * lock)78*168660b8SSebastian Andrzej Siewior void __sched rt_spin_unlock(spinlock_t *lock) __releases(RCU)
790f383b6dSThomas Gleixner {
800f383b6dSThomas Gleixner spin_release(&lock->dep_map, _RET_IP_);
810f383b6dSThomas Gleixner migrate_enable();
820f383b6dSThomas Gleixner rcu_read_unlock();
830f383b6dSThomas Gleixner
840f383b6dSThomas Gleixner if (unlikely(!rt_mutex_cmpxchg_release(&lock->lock, current, NULL)))
850f383b6dSThomas Gleixner rt_mutex_slowunlock(&lock->lock);
860f383b6dSThomas Gleixner }
870f383b6dSThomas Gleixner EXPORT_SYMBOL(rt_spin_unlock);
880f383b6dSThomas Gleixner
890f383b6dSThomas Gleixner /*
900f383b6dSThomas Gleixner * Wait for the lock to get unlocked: instead of polling for an unlock
910f383b6dSThomas Gleixner * (like raw spinlocks do), lock and unlock, to force the kernel to
920f383b6dSThomas Gleixner * schedule if there's contention:
930f383b6dSThomas Gleixner */
rt_spin_lock_unlock(spinlock_t * lock)940f383b6dSThomas Gleixner void __sched rt_spin_lock_unlock(spinlock_t *lock)
950f383b6dSThomas Gleixner {
960f383b6dSThomas Gleixner spin_lock(lock);
970f383b6dSThomas Gleixner spin_unlock(lock);
980f383b6dSThomas Gleixner }
990f383b6dSThomas Gleixner EXPORT_SYMBOL(rt_spin_lock_unlock);
1000f383b6dSThomas Gleixner
__rt_spin_trylock(spinlock_t * lock)1010f383b6dSThomas Gleixner static __always_inline int __rt_spin_trylock(spinlock_t *lock)
1020f383b6dSThomas Gleixner {
1030f383b6dSThomas Gleixner int ret = 1;
1040f383b6dSThomas Gleixner
1050f383b6dSThomas Gleixner if (unlikely(!rt_mutex_cmpxchg_acquire(&lock->lock, NULL, current)))
1060f383b6dSThomas Gleixner ret = rt_mutex_slowtrylock(&lock->lock);
1070f383b6dSThomas Gleixner
1080f383b6dSThomas Gleixner if (ret) {
1090f383b6dSThomas Gleixner spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1100f383b6dSThomas Gleixner rcu_read_lock();
1110f383b6dSThomas Gleixner migrate_disable();
1120f383b6dSThomas Gleixner }
1130f383b6dSThomas Gleixner return ret;
1140f383b6dSThomas Gleixner }
1150f383b6dSThomas Gleixner
rt_spin_trylock(spinlock_t * lock)1160f383b6dSThomas Gleixner int __sched rt_spin_trylock(spinlock_t *lock)
1170f383b6dSThomas Gleixner {
1180f383b6dSThomas Gleixner return __rt_spin_trylock(lock);
1190f383b6dSThomas Gleixner }
1200f383b6dSThomas Gleixner EXPORT_SYMBOL(rt_spin_trylock);
1210f383b6dSThomas Gleixner
rt_spin_trylock_bh(spinlock_t * lock)1220f383b6dSThomas Gleixner int __sched rt_spin_trylock_bh(spinlock_t *lock)
1230f383b6dSThomas Gleixner {
1240f383b6dSThomas Gleixner int ret;
1250f383b6dSThomas Gleixner
1260f383b6dSThomas Gleixner local_bh_disable();
1270f383b6dSThomas Gleixner ret = __rt_spin_trylock(lock);
1280f383b6dSThomas Gleixner if (!ret)
1290f383b6dSThomas Gleixner local_bh_enable();
1300f383b6dSThomas Gleixner return ret;
1310f383b6dSThomas Gleixner }
1320f383b6dSThomas Gleixner EXPORT_SYMBOL(rt_spin_trylock_bh);
1330f383b6dSThomas Gleixner
1340f383b6dSThomas Gleixner #ifdef CONFIG_DEBUG_LOCK_ALLOC
__rt_spin_lock_init(spinlock_t * lock,const char * name,struct lock_class_key * key,bool percpu)1350f383b6dSThomas Gleixner void __rt_spin_lock_init(spinlock_t *lock, const char *name,
13631552385SThomas Gleixner struct lock_class_key *key, bool percpu)
1370f383b6dSThomas Gleixner {
13831552385SThomas Gleixner u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
13931552385SThomas Gleixner
1400f383b6dSThomas Gleixner debug_check_no_locks_freed((void *)lock, sizeof(*lock));
14131552385SThomas Gleixner lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
14231552385SThomas Gleixner LD_WAIT_INV, type);
1430f383b6dSThomas Gleixner }
1440f383b6dSThomas Gleixner EXPORT_SYMBOL(__rt_spin_lock_init);
1450f383b6dSThomas Gleixner #endif
1468282947fSThomas Gleixner
1478282947fSThomas Gleixner /*
1488282947fSThomas Gleixner * RT-specific reader/writer locks
1498282947fSThomas Gleixner */
1508282947fSThomas Gleixner #define rwbase_set_and_save_current_state(state) \
1518282947fSThomas Gleixner current_save_and_set_rtlock_wait_state()
1528282947fSThomas Gleixner
1538282947fSThomas Gleixner #define rwbase_restore_current_state() \
1548282947fSThomas Gleixner current_restore_rtlock_saved_state()
1558282947fSThomas Gleixner
1568282947fSThomas Gleixner static __always_inline int
rwbase_rtmutex_lock_state(struct rt_mutex_base * rtm,unsigned int state)1578282947fSThomas Gleixner rwbase_rtmutex_lock_state(struct rt_mutex_base *rtm, unsigned int state)
1588282947fSThomas Gleixner {
1598282947fSThomas Gleixner if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
1608282947fSThomas Gleixner rtlock_slowlock(rtm);
1618282947fSThomas Gleixner return 0;
1628282947fSThomas Gleixner }
1638282947fSThomas Gleixner
1648282947fSThomas Gleixner static __always_inline int
rwbase_rtmutex_slowlock_locked(struct rt_mutex_base * rtm,unsigned int state,struct wake_q_head * wake_q)1658282947fSThomas Gleixner rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state,
1668282947fSThomas Gleixner struct wake_q_head *wake_q)
1678282947fSThomas Gleixner {
1688282947fSThomas Gleixner rtlock_slowlock_locked(rtm, wake_q);
1698282947fSThomas Gleixner return 0;
1708282947fSThomas Gleixner }
1718282947fSThomas Gleixner
rwbase_rtmutex_unlock(struct rt_mutex_base * rtm)1728282947fSThomas Gleixner static __always_inline void rwbase_rtmutex_unlock(struct rt_mutex_base *rtm)
1738282947fSThomas Gleixner {
1748282947fSThomas Gleixner if (likely(rt_mutex_cmpxchg_acquire(rtm, current, NULL)))
1758282947fSThomas Gleixner return;
1768282947fSThomas Gleixner
1778282947fSThomas Gleixner rt_mutex_slowunlock(rtm);
1788282947fSThomas Gleixner }
1798282947fSThomas Gleixner
rwbase_rtmutex_trylock(struct rt_mutex_base * rtm)1808282947fSThomas Gleixner static __always_inline int rwbase_rtmutex_trylock(struct rt_mutex_base *rtm)
1818282947fSThomas Gleixner {
1828282947fSThomas Gleixner if (likely(rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
1838282947fSThomas Gleixner return 1;
1848282947fSThomas Gleixner
1858282947fSThomas Gleixner return rt_mutex_slowtrylock(rtm);
1868282947fSThomas Gleixner }
1878282947fSThomas Gleixner
1888282947fSThomas Gleixner #define rwbase_signal_pending_state(state, current) (0)
189d14f9e93SSebastian Andrzej Siewior
190d14f9e93SSebastian Andrzej Siewior #define rwbase_pre_schedule()
1918282947fSThomas Gleixner
1928282947fSThomas Gleixner #define rwbase_schedule() \
1938282947fSThomas Gleixner schedule_rtlock()
194d14f9e93SSebastian Andrzej Siewior
195d14f9e93SSebastian Andrzej Siewior #define rwbase_post_schedule()
1968282947fSThomas Gleixner
1978282947fSThomas Gleixner #include "rwbase_rt.c"
1988282947fSThomas Gleixner /*
1998282947fSThomas Gleixner * The common functions which get wrapped into the rwlock API.
2008282947fSThomas Gleixner */
rt_read_trylock(rwlock_t * rwlock)2018282947fSThomas Gleixner int __sched rt_read_trylock(rwlock_t *rwlock)
2028282947fSThomas Gleixner {
2038282947fSThomas Gleixner int ret;
2048282947fSThomas Gleixner
2058282947fSThomas Gleixner ret = rwbase_read_trylock(&rwlock->rwbase);
2068282947fSThomas Gleixner if (ret) {
2078282947fSThomas Gleixner rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
2088282947fSThomas Gleixner rcu_read_lock();
2098282947fSThomas Gleixner migrate_disable();
2108282947fSThomas Gleixner }
2118282947fSThomas Gleixner return ret;
2128282947fSThomas Gleixner }
2138282947fSThomas Gleixner EXPORT_SYMBOL(rt_read_trylock);
2148282947fSThomas Gleixner
rt_write_trylock(rwlock_t * rwlock)2158282947fSThomas Gleixner int __sched rt_write_trylock(rwlock_t *rwlock)
2168282947fSThomas Gleixner {
2178282947fSThomas Gleixner int ret;
2188282947fSThomas Gleixner
2198282947fSThomas Gleixner ret = rwbase_write_trylock(&rwlock->rwbase);
2208282947fSThomas Gleixner if (ret) {
2218282947fSThomas Gleixner rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
2228282947fSThomas Gleixner rcu_read_lock();
2238282947fSThomas Gleixner migrate_disable();
2248282947fSThomas Gleixner }
2258282947fSThomas Gleixner return ret;
2268282947fSThomas Gleixner }
2278282947fSThomas Gleixner EXPORT_SYMBOL(rt_write_trylock);
228*168660b8SSebastian Andrzej Siewior
rt_read_lock(rwlock_t * rwlock)2298282947fSThomas Gleixner void __sched rt_read_lock(rwlock_t *rwlock) __acquires(RCU)
230ef1f4804SThomas Gleixner {
2318282947fSThomas Gleixner rtlock_might_resched();
2328282947fSThomas Gleixner rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
2338282947fSThomas Gleixner rwbase_read_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
2348282947fSThomas Gleixner rcu_read_lock();
2358282947fSThomas Gleixner migrate_disable();
2368282947fSThomas Gleixner }
2378282947fSThomas Gleixner EXPORT_SYMBOL(rt_read_lock);
238*168660b8SSebastian Andrzej Siewior
rt_write_lock(rwlock_t * rwlock)2398282947fSThomas Gleixner void __sched rt_write_lock(rwlock_t *rwlock) __acquires(RCU)
240ef1f4804SThomas Gleixner {
2418282947fSThomas Gleixner rtlock_might_resched();
2428282947fSThomas Gleixner rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
2438282947fSThomas Gleixner rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
2448282947fSThomas Gleixner rcu_read_lock();
2458282947fSThomas Gleixner migrate_disable();
2468282947fSThomas Gleixner }
2478282947fSThomas Gleixner EXPORT_SYMBOL(rt_write_lock);
2484a57d6bbSMinchan Kim
249*168660b8SSebastian Andrzej Siewior #ifdef CONFIG_DEBUG_LOCK_ALLOC
rt_write_lock_nested(rwlock_t * rwlock,int subclass)2504a57d6bbSMinchan Kim void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(RCU)
2514a57d6bbSMinchan Kim {
2524a57d6bbSMinchan Kim rtlock_might_resched();
2534a57d6bbSMinchan Kim rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_);
2544a57d6bbSMinchan Kim rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
2554a57d6bbSMinchan Kim rcu_read_lock();
2564a57d6bbSMinchan Kim migrate_disable();
2574a57d6bbSMinchan Kim }
2584a57d6bbSMinchan Kim EXPORT_SYMBOL(rt_write_lock_nested);
2594a57d6bbSMinchan Kim #endif
260*168660b8SSebastian Andrzej Siewior
rt_read_unlock(rwlock_t * rwlock)2618282947fSThomas Gleixner void __sched rt_read_unlock(rwlock_t *rwlock) __releases(RCU)
2628282947fSThomas Gleixner {
2638282947fSThomas Gleixner rwlock_release(&rwlock->dep_map, _RET_IP_);
2648282947fSThomas Gleixner migrate_enable();
2658282947fSThomas Gleixner rcu_read_unlock();
2668282947fSThomas Gleixner rwbase_read_unlock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
2678282947fSThomas Gleixner }
2688282947fSThomas Gleixner EXPORT_SYMBOL(rt_read_unlock);
269*168660b8SSebastian Andrzej Siewior
rt_write_unlock(rwlock_t * rwlock)2708282947fSThomas Gleixner void __sched rt_write_unlock(rwlock_t *rwlock) __releases(RCU)
2718282947fSThomas Gleixner {
2728282947fSThomas Gleixner rwlock_release(&rwlock->dep_map, _RET_IP_);
2738282947fSThomas Gleixner rcu_read_unlock();
2748282947fSThomas Gleixner migrate_enable();
2758282947fSThomas Gleixner rwbase_write_unlock(&rwlock->rwbase);
2768282947fSThomas Gleixner }
2778282947fSThomas Gleixner EXPORT_SYMBOL(rt_write_unlock);
2788282947fSThomas Gleixner
2798282947fSThomas Gleixner #ifdef CONFIG_DEBUG_LOCK_ALLOC
__rt_rwlock_init(rwlock_t * rwlock,const char * name,struct lock_class_key * key)2808282947fSThomas Gleixner void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
2818282947fSThomas Gleixner struct lock_class_key *key)
2828282947fSThomas Gleixner {
2838282947fSThomas Gleixner debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
2848282947fSThomas Gleixner lockdep_init_map_wait(&rwlock->dep_map, name, key, 0, LD_WAIT_CONFIG);
2858282947fSThomas Gleixner }
2868282947fSThomas Gleixner EXPORT_SYMBOL(__rt_rwlock_init);
287 #endif
288