Lines Matching refs:lock

37 					struct rt_mutex *lock,  in __ww_mutex_add_waiter()  argument
44 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, in __ww_mutex_check_waiters() argument
50 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, in ww_mutex_lock_acquired() argument
55 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, in __ww_mutex_check_kill() argument
96 rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_owner_encode() argument
100 if (rt_mutex_has_waiters(lock)) in rt_mutex_owner_encode()
107 rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_set_owner() argument
113 xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner)); in rt_mutex_set_owner()
116 static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock) in rt_mutex_clear_owner() argument
119 WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL)); in rt_mutex_clear_owner()
122 static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock) in clear_rt_mutex_waiters() argument
124 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters()
125 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters()
129 fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock) in fixup_rt_mutex_waiters() argument
131 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters()
133 if (rt_mutex_has_waiters(lock)) in fixup_rt_mutex_waiters()
217 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_acquire() argument
221 return try_cmpxchg_acquire(&lock->owner, &old, new); in rt_mutex_cmpxchg_acquire()
224 static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock) in rt_mutex_try_acquire() argument
226 return rt_mutex_cmpxchg_acquire(lock, NULL, current); in rt_mutex_try_acquire()
229 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_release() argument
233 return try_cmpxchg_release(&lock->owner, &old, new); in rt_mutex_cmpxchg_release()
241 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) in mark_rt_mutex_waiters() argument
243 unsigned long *p = (unsigned long *) &lock->owner; in mark_rt_mutex_waiters()
265 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, in unlock_rt_mutex_safe() argument
267 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
269 struct task_struct *owner = rt_mutex_owner(lock); in unlock_rt_mutex_safe()
271 clear_rt_mutex_waiters(lock); in unlock_rt_mutex_safe()
272 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
297 return rt_mutex_cmpxchg_release(lock, owner, NULL); in unlock_rt_mutex_safe()
301 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_acquire() argument
309 static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock);
311 static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock) in rt_mutex_try_acquire() argument
320 return rt_mutex_slowtrylock(lock); in rt_mutex_try_acquire()
323 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_release() argument
330 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) in mark_rt_mutex_waiters() argument
332 lock->owner = (struct task_struct *) in mark_rt_mutex_waiters()
333 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); in mark_rt_mutex_waiters()
339 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, in unlock_rt_mutex_safe() argument
341 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
343 lock->owner = NULL; in unlock_rt_mutex_safe()
344 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
365 lockdep_assert_held(&waiter->lock->wait_lock); in waiter_update_prio()
378 lockdep_assert_held(&waiter->lock->wait_lock); in waiter_clone_prio()
480 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue() argument
482 lockdep_assert_held(&lock->wait_lock); in rt_mutex_enqueue()
484 rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less); in rt_mutex_enqueue()
488 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue() argument
490 lockdep_assert_held(&lock->wait_lock); in rt_mutex_dequeue()
495 rb_erase_cached(&waiter->tree.entry, &lock->waiters); in rt_mutex_dequeue()
527 static __always_inline void rt_mutex_adjust_prio(struct rt_mutex_base *lock, in rt_mutex_adjust_prio() argument
532 lockdep_assert_held(&lock->wait_lock); in rt_mutex_adjust_prio()
533 lockdep_assert(rt_mutex_owner(lock) == p); in rt_mutex_adjust_prio()
602 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; in task_blocked_on_lock()
688 struct rt_mutex_base *lock; in rt_mutex_adjust_prio_chain() local
767 if (next_lock != waiter->lock) in rt_mutex_adjust_prio_chain()
835 lock = waiter->lock; in rt_mutex_adjust_prio_chain()
845 if (!raw_spin_trylock(&lock->wait_lock)) { in rt_mutex_adjust_prio_chain()
860 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { in rt_mutex_adjust_prio_chain()
875 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
896 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
897 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
902 task = get_task_struct(rt_mutex_owner(lock)); in rt_mutex_adjust_prio_chain()
915 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
919 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
932 prerequeue_top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
935 rt_mutex_dequeue(lock, waiter); in rt_mutex_adjust_prio_chain()
950 rt_mutex_enqueue(lock, waiter); in rt_mutex_adjust_prio_chain()
969 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
975 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
978 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
988 task = get_task_struct(rt_mutex_owner(lock)); in rt_mutex_adjust_prio_chain()
992 if (waiter == rt_mutex_top_waiter(lock)) { in rt_mutex_adjust_prio_chain()
1002 rt_mutex_adjust_prio(lock, task); in rt_mutex_adjust_prio_chain()
1016 waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
1019 rt_mutex_adjust_prio(lock, task); in rt_mutex_adjust_prio_chain()
1042 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
1046 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
1087 try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task, in try_to_take_rt_mutex() argument
1090 lockdep_assert_held(&lock->wait_lock); in try_to_take_rt_mutex()
1109 mark_rt_mutex_waiters(lock); in try_to_take_rt_mutex()
1114 if (rt_mutex_owner(lock)) in try_to_take_rt_mutex()
1123 struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock); in try_to_take_rt_mutex()
1134 rt_mutex_dequeue(lock, waiter); in try_to_take_rt_mutex()
1147 if (rt_mutex_has_waiters(lock)) { in try_to_take_rt_mutex()
1150 rt_mutex_top_waiter(lock))) in try_to_take_rt_mutex()
1182 if (rt_mutex_has_waiters(lock)) in try_to_take_rt_mutex()
1183 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); in try_to_take_rt_mutex()
1191 rt_mutex_set_owner(lock, task); in try_to_take_rt_mutex()
1203 static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, in task_blocks_on_rt_mutex() argument
1210 struct task_struct *owner = rt_mutex_owner(lock); in task_blocks_on_rt_mutex()
1215 lockdep_assert_held(&lock->wait_lock); in task_blocks_on_rt_mutex()
1234 waiter->lock = lock; in task_blocks_on_rt_mutex()
1239 if (rt_mutex_has_waiters(lock)) in task_blocks_on_rt_mutex()
1240 top_waiter = rt_mutex_top_waiter(lock); in task_blocks_on_rt_mutex()
1241 rt_mutex_enqueue(lock, waiter); in task_blocks_on_rt_mutex()
1251 rtm = container_of(lock, struct rt_mutex, rtmutex); in task_blocks_on_rt_mutex()
1255 rt_mutex_dequeue(lock, waiter); in task_blocks_on_rt_mutex()
1266 if (waiter == rt_mutex_top_waiter(lock)) { in task_blocks_on_rt_mutex()
1270 rt_mutex_adjust_prio(lock, owner); in task_blocks_on_rt_mutex()
1296 raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); in task_blocks_on_rt_mutex()
1298 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, in task_blocks_on_rt_mutex()
1301 raw_spin_lock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1313 struct rt_mutex_base *lock) in mark_wakeup_next_waiter() argument
1317 lockdep_assert_held(&lock->wait_lock); in mark_wakeup_next_waiter()
1321 waiter = rt_mutex_top_waiter(lock); in mark_wakeup_next_waiter()
1331 rt_mutex_adjust_prio(lock, current); in mark_wakeup_next_waiter()
1341 lock->owner = (void *) RT_MUTEX_HAS_WAITERS; in mark_wakeup_next_waiter()
1358 static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) in __rt_mutex_slowtrylock() argument
1360 int ret = try_to_take_rt_mutex(lock, current, NULL); in __rt_mutex_slowtrylock()
1366 fixup_rt_mutex_waiters(lock, true); in __rt_mutex_slowtrylock()
1374 static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock) in rt_mutex_slowtrylock() argument
1384 if (rt_mutex_owner(lock)) in rt_mutex_slowtrylock()
1391 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1393 ret = __rt_mutex_slowtrylock(lock); in rt_mutex_slowtrylock()
1395 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1400 static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock) in __rt_mutex_trylock() argument
1402 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) in __rt_mutex_trylock()
1405 return rt_mutex_slowtrylock(lock); in __rt_mutex_trylock()
1411 static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) in rt_mutex_slowunlock() argument
1417 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1419 debug_rt_mutex_unlock(lock); in rt_mutex_slowunlock()
1452 while (!rt_mutex_has_waiters(lock)) { in rt_mutex_slowunlock()
1454 if (unlock_rt_mutex_safe(lock, flags) == true) in rt_mutex_slowunlock()
1457 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1466 mark_wakeup_next_waiter(&wqh, lock); in rt_mutex_slowunlock()
1467 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1472 static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock) in __rt_mutex_unlock() argument
1474 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) in __rt_mutex_unlock()
1477 rt_mutex_slowunlock(lock); in __rt_mutex_unlock()
1481 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, in rtmutex_spin_on_owner() argument
1490 if (owner != rt_mutex_owner(lock)) in rtmutex_spin_on_owner()
1508 !rt_mutex_waiter_is_top_waiter(lock, waiter)) { in rtmutex_spin_on_owner()
1518 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, in rtmutex_spin_on_owner() argument
1539 static void __sched remove_waiter(struct rt_mutex_base *lock, in remove_waiter() argument
1542 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); in remove_waiter()
1543 struct task_struct *owner = rt_mutex_owner(lock); in remove_waiter()
1546 lockdep_assert_held(&lock->wait_lock); in remove_waiter()
1549 rt_mutex_dequeue(lock, waiter); in remove_waiter()
1564 if (rt_mutex_has_waiters(lock)) in remove_waiter()
1565 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); in remove_waiter()
1567 rt_mutex_adjust_prio(lock, owner); in remove_waiter()
1584 raw_spin_unlock_irq(&lock->wait_lock); in remove_waiter()
1586 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, in remove_waiter()
1589 raw_spin_lock_irq(&lock->wait_lock); in remove_waiter()
1604 static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, in rt_mutex_slowlock_block() argument
1610 __releases(&lock->wait_lock) __acquires(&lock->wait_lock) in rt_mutex_slowlock_block()
1612 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); in rt_mutex_slowlock_block()
1619 if (try_to_take_rt_mutex(lock, current, waiter)) { in rt_mutex_slowlock_block()
1639 if (waiter == rt_mutex_top_waiter(lock)) in rt_mutex_slowlock_block()
1640 owner = rt_mutex_owner(lock); in rt_mutex_slowlock_block()
1643 raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); in rt_mutex_slowlock_block()
1645 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) { in rt_mutex_slowlock_block()
1650 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_slowlock_block()
1659 struct rt_mutex_base *lock, in rt_mutex_handle_deadlock() argument
1672 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_handle_deadlock()
1691 static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, in __rt_mutex_slowlock() argument
1698 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); in __rt_mutex_slowlock()
1702 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_slowlock()
1706 if (try_to_take_rt_mutex(lock, current, NULL)) { in __rt_mutex_slowlock()
1717 trace_contention_begin(lock, LCB_F_RT); in __rt_mutex_slowlock()
1719 ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk, wake_q); in __rt_mutex_slowlock()
1721 ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter, wake_q); in __rt_mutex_slowlock()
1733 remove_waiter(lock, waiter); in __rt_mutex_slowlock()
1734 rt_mutex_handle_deadlock(ret, chwalk, lock, waiter); in __rt_mutex_slowlock()
1742 fixup_rt_mutex_waiters(lock, true); in __rt_mutex_slowlock()
1744 trace_contention_end(lock, ret); in __rt_mutex_slowlock()
1749 static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, in __rt_mutex_slowlock_locked() argument
1760 ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK, in __rt_mutex_slowlock_locked()
1774 static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, in rt_mutex_slowlock() argument
1799 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowlock()
1800 ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state, &wake_q); in rt_mutex_slowlock()
1801 raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); in rt_mutex_slowlock()
1807 static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, in __rt_mutex_lock() argument
1812 if (likely(rt_mutex_try_acquire(lock))) in __rt_mutex_lock()
1815 return rt_mutex_slowlock(lock, NULL, state); in __rt_mutex_lock()
1829 static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock, in rtlock_slowlock_locked() argument
1831 __releases(&lock->wait_lock) __acquires(&lock->wait_lock) in rtlock_slowlock_locked()
1836 lockdep_assert_held(&lock->wait_lock); in rtlock_slowlock_locked()
1839 if (try_to_take_rt_mutex(lock, current, NULL)) { in rtlock_slowlock_locked()
1849 trace_contention_begin(lock, LCB_F_RT); in rtlock_slowlock_locked()
1851 task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK, wake_q); in rtlock_slowlock_locked()
1855 if (try_to_take_rt_mutex(lock, current, &waiter)) { in rtlock_slowlock_locked()
1860 if (&waiter == rt_mutex_top_waiter(lock)) in rtlock_slowlock_locked()
1861 owner = rt_mutex_owner(lock); in rtlock_slowlock_locked()
1864 raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); in rtlock_slowlock_locked()
1866 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) { in rtlock_slowlock_locked()
1871 raw_spin_lock_irq(&lock->wait_lock); in rtlock_slowlock_locked()
1882 fixup_rt_mutex_waiters(lock, true); in rtlock_slowlock_locked()
1885 trace_contention_end(lock, 0); in rtlock_slowlock_locked()
1889 static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock) in rtlock_slowlock() argument
1894 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rtlock_slowlock()
1895 rtlock_slowlock_locked(lock, &wake_q); in rtlock_slowlock()
1896 raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); in rtlock_slowlock()