1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21696a8beSPeter Zijlstra /*
31696a8beSPeter Zijlstra  * RT Mutexes: blocking mutual exclusion locks with PI support
41696a8beSPeter Zijlstra  *
51696a8beSPeter Zijlstra  * started by Ingo Molnar and Thomas Gleixner:
61696a8beSPeter Zijlstra  *
71696a8beSPeter Zijlstra  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <[email protected]>
81696a8beSPeter Zijlstra  *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <[email protected]>
91696a8beSPeter Zijlstra  *
101696a8beSPeter Zijlstra  * This file contains the private data structure and API definitions.
111696a8beSPeter Zijlstra  */
121696a8beSPeter Zijlstra 
131696a8beSPeter Zijlstra #ifndef __KERNEL_RTMUTEX_COMMON_H
141696a8beSPeter Zijlstra #define __KERNEL_RTMUTEX_COMMON_H
151696a8beSPeter Zijlstra 
16f41dcc18SThomas Gleixner #include <linux/debug_locks.h>
171696a8beSPeter Zijlstra #include <linux/rtmutex.h>
1884f001e1SIngo Molnar #include <linux/sched/wake_q.h>
191696a8beSPeter Zijlstra 
20f7853c34SPeter Zijlstra 
21f7853c34SPeter Zijlstra /*
22f7853c34SPeter Zijlstra  * This is a helper for the struct rt_mutex_waiter below. A waiter goes in two
23f7853c34SPeter Zijlstra  * separate trees and they need their own copy of the sort keys because of
24f7853c34SPeter Zijlstra  * different locking requirements.
25f7853c34SPeter Zijlstra  *
26f7853c34SPeter Zijlstra  * @entry:		rbtree node to enqueue into the waiters tree
27f7853c34SPeter Zijlstra  * @prio:		Priority of the waiter
28f7853c34SPeter Zijlstra  * @deadline:		Deadline of the waiter if applicable
29f7853c34SPeter Zijlstra  *
30f7853c34SPeter Zijlstra  * See rt_waiter_node_less() and waiter_*_prio().
31f7853c34SPeter Zijlstra  */
32f7853c34SPeter Zijlstra struct rt_waiter_node {
33f7853c34SPeter Zijlstra 	struct rb_node	entry;
34f7853c34SPeter Zijlstra 	int		prio;
35f7853c34SPeter Zijlstra 	u64		deadline;
36f7853c34SPeter Zijlstra };
37f7853c34SPeter Zijlstra 
381696a8beSPeter Zijlstra /*
391696a8beSPeter Zijlstra  * This is the control structure for tasks blocked on a rt_mutex,
401696a8beSPeter Zijlstra  * which is allocated on the kernel stack on of the blocked task.
411696a8beSPeter Zijlstra  *
42f7853c34SPeter Zijlstra  * @tree:		node to enqueue into the mutex waiters tree
43f7853c34SPeter Zijlstra  * @pi_tree:		node to enqueue into the mutex owner waiters tree
441696a8beSPeter Zijlstra  * @task:		task reference to the blocked task
4537350e3bSThomas Gleixner  * @lock:		Pointer to the rt_mutex on which the waiter blocks
46c014ef69SThomas Gleixner  * @wake_state:		Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT)
47add46132SPeter Zijlstra  * @ww_ctx:		WW context pointer
48f7853c34SPeter Zijlstra  *
49f7853c34SPeter Zijlstra  * @tree is ordered by @lock->wait_lock
50f7853c34SPeter Zijlstra  * @pi_tree is ordered by rt_mutex_owner(@lock)->pi_lock
511696a8beSPeter Zijlstra  */
521696a8beSPeter Zijlstra struct rt_mutex_waiter {
53f7853c34SPeter Zijlstra 	struct rt_waiter_node	tree;
54f7853c34SPeter Zijlstra 	struct rt_waiter_node	pi_tree;
551696a8beSPeter Zijlstra 	struct task_struct	*task;
56830e6accSPeter Zijlstra 	struct rt_mutex_base	*lock;
57c014ef69SThomas Gleixner 	unsigned int		wake_state;
58add46132SPeter Zijlstra 	struct ww_acquire_ctx	*ww_ctx;
591696a8beSPeter Zijlstra };
601696a8beSPeter Zijlstra 
61b576e640SThomas Gleixner /**
62*b3c5ec8bSRandy Dunlap  * struct rt_wake_q_head - Wrapper around regular wake_q_head to support
63b576e640SThomas Gleixner  *			   "sleeping" spinlocks on RT
64b576e640SThomas Gleixner  * @head:		The regular wake_q_head for sleeping lock variants
65456cfbc6SThomas Gleixner  * @rtlock_task:	Task pointer for RT lock (spin/rwlock) wakeups
66b576e640SThomas Gleixner  */
67b576e640SThomas Gleixner struct rt_wake_q_head {
68b576e640SThomas Gleixner 	struct wake_q_head	head;
69456cfbc6SThomas Gleixner 	struct task_struct	*rtlock_task;
70b576e640SThomas Gleixner };
71b576e640SThomas Gleixner 
72b576e640SThomas Gleixner #define DEFINE_RT_WAKE_Q(name)						\
73b576e640SThomas Gleixner 	struct rt_wake_q_head name = {					\
74b576e640SThomas Gleixner 		.head		= WAKE_Q_HEAD_INITIALIZER(name.head),	\
75456cfbc6SThomas Gleixner 		.rtlock_task	= NULL,					\
76b576e640SThomas Gleixner 	}
77b576e640SThomas Gleixner 
781696a8beSPeter Zijlstra /*
79531ae4b0SThomas Gleixner  * PI-futex support (proxy locking functions, etc.):
80531ae4b0SThomas Gleixner  */
81830e6accSPeter Zijlstra extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
82531ae4b0SThomas Gleixner 				       struct task_struct *proxy_owner);
83830e6accSPeter Zijlstra extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock);
84830e6accSPeter Zijlstra extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
85531ae4b0SThomas Gleixner 				     struct rt_mutex_waiter *waiter,
86894d1b3dSPeter Zijlstra 				     struct task_struct *task,
87894d1b3dSPeter Zijlstra 				     struct wake_q_head *);
88830e6accSPeter Zijlstra extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
89531ae4b0SThomas Gleixner 				     struct rt_mutex_waiter *waiter,
90531ae4b0SThomas Gleixner 				     struct task_struct *task);
91830e6accSPeter Zijlstra extern int rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
92531ae4b0SThomas Gleixner 			       struct hrtimer_sleeper *to,
93531ae4b0SThomas Gleixner 			       struct rt_mutex_waiter *waiter);
94830e6accSPeter Zijlstra extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
95531ae4b0SThomas Gleixner 				 struct rt_mutex_waiter *waiter);
96531ae4b0SThomas Gleixner 
97830e6accSPeter Zijlstra extern int rt_mutex_futex_trylock(struct rt_mutex_base *l);
98830e6accSPeter Zijlstra extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l);
99531ae4b0SThomas Gleixner 
100830e6accSPeter Zijlstra extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock);
101830e6accSPeter Zijlstra extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
1027980aa39SThomas Gleixner 				struct rt_wake_q_head *wqh);
103531ae4b0SThomas Gleixner 
1047980aa39SThomas Gleixner extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh);
105531ae4b0SThomas Gleixner 
106531ae4b0SThomas Gleixner /*
10737350e3bSThomas Gleixner  * Must be guarded because this header is included from rcu/tree_plugin.h
10837350e3bSThomas Gleixner  * unconditionally.
1091696a8beSPeter Zijlstra  */
110bc2eecd7SNicolas Pitre #ifdef CONFIG_RT_MUTEXES
rt_mutex_has_waiters(struct rt_mutex_base * lock)111830e6accSPeter Zijlstra static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock)
1121696a8beSPeter Zijlstra {
113a23ba907SDavidlohr Bueso 	return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
1141696a8beSPeter Zijlstra }
1151696a8beSPeter Zijlstra 
116c3123c43SThomas Gleixner /*
117c3123c43SThomas Gleixner  * Lockless speculative check whether @waiter is still the top waiter on
118c3123c43SThomas Gleixner  * @lock. This is solely comparing pointers and not derefencing the
119c3123c43SThomas Gleixner  * leftmost entry which might be about to vanish.
120c3123c43SThomas Gleixner  */
rt_mutex_waiter_is_top_waiter(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter)121c3123c43SThomas Gleixner static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock,
122c3123c43SThomas Gleixner 						 struct rt_mutex_waiter *waiter)
123c3123c43SThomas Gleixner {
124c3123c43SThomas Gleixner 	struct rb_node *leftmost = rb_first_cached(&lock->waiters);
125c3123c43SThomas Gleixner 
126f7853c34SPeter Zijlstra 	return rb_entry(leftmost, struct rt_mutex_waiter, tree.entry) == waiter;
127c3123c43SThomas Gleixner }
128c3123c43SThomas Gleixner 
rt_mutex_top_waiter(struct rt_mutex_base * lock)129830e6accSPeter Zijlstra static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock)
1301696a8beSPeter Zijlstra {
131c28d62cfSPeter Zijlstra 	struct rb_node *leftmost = rb_first_cached(&lock->waiters);
132c28d62cfSPeter Zijlstra 	struct rt_mutex_waiter *w = NULL;
1331696a8beSPeter Zijlstra 
134f7853c34SPeter Zijlstra 	lockdep_assert_held(&lock->wait_lock);
135f7853c34SPeter Zijlstra 
136c28d62cfSPeter Zijlstra 	if (leftmost) {
137f7853c34SPeter Zijlstra 		w = rb_entry(leftmost, struct rt_mutex_waiter, tree.entry);
1381696a8beSPeter Zijlstra 		BUG_ON(w->lock != lock);
139c28d62cfSPeter Zijlstra 	}
1401696a8beSPeter Zijlstra 	return w;
1411696a8beSPeter Zijlstra }
1421696a8beSPeter Zijlstra 
task_has_pi_waiters(struct task_struct * p)1431696a8beSPeter Zijlstra static inline int task_has_pi_waiters(struct task_struct *p)
1441696a8beSPeter Zijlstra {
145a23ba907SDavidlohr Bueso 	return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root);
1461696a8beSPeter Zijlstra }
1471696a8beSPeter Zijlstra 
task_top_pi_waiter(struct task_struct * p)14837350e3bSThomas Gleixner static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p)
1491696a8beSPeter Zijlstra {
150f7853c34SPeter Zijlstra 	lockdep_assert_held(&p->pi_lock);
151f7853c34SPeter Zijlstra 
15237350e3bSThomas Gleixner 	return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter,
153f7853c34SPeter Zijlstra 			pi_tree.entry);
1541696a8beSPeter Zijlstra }
1551696a8beSPeter Zijlstra 
1561696a8beSPeter Zijlstra #define RT_MUTEX_HAS_WAITERS	1UL
1571696a8beSPeter Zijlstra 
rt_mutex_owner(struct rt_mutex_base * lock)158830e6accSPeter Zijlstra static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
1591696a8beSPeter Zijlstra {
1601be5d4faSThomas Gleixner 	unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
1611be5d4faSThomas Gleixner 
162b5016e82SThomas Gleixner 	return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
1631696a8beSPeter Zijlstra }
1641696a8beSPeter Zijlstra 
1651696a8beSPeter Zijlstra /*
1668930ed80SThomas Gleixner  * Constants for rt mutex functions which have a selectable deadlock
1678930ed80SThomas Gleixner  * detection.
1688930ed80SThomas Gleixner  *
1698930ed80SThomas Gleixner  * RT_MUTEX_MIN_CHAINWALK:	Stops the lock chain walk when there are
1708930ed80SThomas Gleixner  *				no further PI adjustments to be made.
1718930ed80SThomas Gleixner  *
1728930ed80SThomas Gleixner  * RT_MUTEX_FULL_CHAINWALK:	Invoke deadlock detection with a full
1738930ed80SThomas Gleixner  *				walk of the lock chain.
1748930ed80SThomas Gleixner  */
1758930ed80SThomas Gleixner enum rtmutex_chainwalk {
1768930ed80SThomas Gleixner 	RT_MUTEX_MIN_CHAINWALK,
1778930ed80SThomas Gleixner 	RT_MUTEX_FULL_CHAINWALK,
1788930ed80SThomas Gleixner };
1798930ed80SThomas Gleixner 
__rt_mutex_base_init(struct rt_mutex_base * lock)180830e6accSPeter Zijlstra static inline void __rt_mutex_base_init(struct rt_mutex_base *lock)
181f5a98866SThomas Gleixner {
182f5a98866SThomas Gleixner 	raw_spin_lock_init(&lock->wait_lock);
183f5a98866SThomas Gleixner 	lock->waiters = RB_ROOT_CACHED;
184830e6accSPeter Zijlstra 	lock->owner = NULL;
185f5a98866SThomas Gleixner }
186f5a98866SThomas Gleixner 
187f41dcc18SThomas Gleixner /* Debug functions */
debug_rt_mutex_unlock(struct rt_mutex_base * lock)188830e6accSPeter Zijlstra static inline void debug_rt_mutex_unlock(struct rt_mutex_base *lock)
189f41dcc18SThomas Gleixner {
190f41dcc18SThomas Gleixner 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
191f41dcc18SThomas Gleixner 		DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
192f41dcc18SThomas Gleixner }
193f41dcc18SThomas Gleixner 
debug_rt_mutex_proxy_unlock(struct rt_mutex_base * lock)194830e6accSPeter Zijlstra static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
195f41dcc18SThomas Gleixner {
196f41dcc18SThomas Gleixner 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
197f41dcc18SThomas Gleixner 		DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
198f41dcc18SThomas Gleixner }
199f41dcc18SThomas Gleixner 
debug_rt_mutex_init_waiter(struct rt_mutex_waiter * waiter)200f41dcc18SThomas Gleixner static inline void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
201f41dcc18SThomas Gleixner {
202f41dcc18SThomas Gleixner 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
203f41dcc18SThomas Gleixner 		memset(waiter, 0x11, sizeof(*waiter));
204f41dcc18SThomas Gleixner }
205f41dcc18SThomas Gleixner 
debug_rt_mutex_free_waiter(struct rt_mutex_waiter * waiter)206f41dcc18SThomas Gleixner static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
207f41dcc18SThomas Gleixner {
208f41dcc18SThomas Gleixner 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
209f41dcc18SThomas Gleixner 		memset(waiter, 0x22, sizeof(*waiter));
210f41dcc18SThomas Gleixner }
2111696a8beSPeter Zijlstra 
rt_mutex_init_waiter(struct rt_mutex_waiter * waiter)212531ae4b0SThomas Gleixner static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
213531ae4b0SThomas Gleixner {
214531ae4b0SThomas Gleixner 	debug_rt_mutex_init_waiter(waiter);
215f7853c34SPeter Zijlstra 	RB_CLEAR_NODE(&waiter->pi_tree.entry);
216f7853c34SPeter Zijlstra 	RB_CLEAR_NODE(&waiter->tree.entry);
217c014ef69SThomas Gleixner 	waiter->wake_state = TASK_NORMAL;
218531ae4b0SThomas Gleixner 	waiter->task = NULL;
219531ae4b0SThomas Gleixner }
220531ae4b0SThomas Gleixner 
rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter * waiter)2211c143c4bSThomas Gleixner static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter)
222c014ef69SThomas Gleixner {
223c014ef69SThomas Gleixner 	rt_mutex_init_waiter(waiter);
224c014ef69SThomas Gleixner 	waiter->wake_state = TASK_RTLOCK_WAIT;
225c014ef69SThomas Gleixner }
226c014ef69SThomas Gleixner 
227531ae4b0SThomas Gleixner #else /* CONFIG_RT_MUTEXES */
228531ae4b0SThomas Gleixner /* Used in rcu/tree_plugin.h */
rt_mutex_owner(struct rt_mutex_base * lock)229830e6accSPeter Zijlstra static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
230531ae4b0SThomas Gleixner {
231531ae4b0SThomas Gleixner 	return NULL;
232531ae4b0SThomas Gleixner }
233531ae4b0SThomas Gleixner #endif  /* !CONFIG_RT_MUTEXES */
234531ae4b0SThomas Gleixner 
2351696a8beSPeter Zijlstra #endif
236