1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * RT Mutexes: blocking mutual exclusion locks with PI support
4  *
5  * started by Ingo Molnar and Thomas Gleixner:
6  *
7  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <[email protected]>
8  *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <[email protected]>
9  *
10  * This file contains the private data structure and API definitions.
11  */
12 
13 #ifndef __KERNEL_RTMUTEX_COMMON_H
14 #define __KERNEL_RTMUTEX_COMMON_H
15 
16 #include <linux/debug_locks.h>
17 #include <linux/rtmutex.h>
18 #include <linux/sched/wake_q.h>
19 
20 /*
21  * This is the control structure for tasks blocked on a rt_mutex,
22  * which is allocated on the kernel stack on of the blocked task.
23  *
24  * @tree_entry:		pi node to enqueue into the mutex waiters tree
25  * @pi_tree_entry:	pi node to enqueue into the mutex owner waiters tree
26  * @task:		task reference to the blocked task
27  * @lock:		Pointer to the rt_mutex on which the waiter blocks
28  * @prio:		Priority of the waiter
29  * @deadline:		Deadline of the waiter if applicable
30  */
31 struct rt_mutex_waiter {
32 	struct rb_node		tree_entry;
33 	struct rb_node		pi_tree_entry;
34 	struct task_struct	*task;
35 	struct rt_mutex_base	*lock;
36 	int			prio;
37 	u64			deadline;
38 };
39 
40 /*
41  * PI-futex support (proxy locking functions, etc.):
42  */
43 extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
44 				       struct task_struct *proxy_owner);
45 extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock);
46 extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
47 				     struct rt_mutex_waiter *waiter,
48 				     struct task_struct *task);
49 extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
50 				     struct rt_mutex_waiter *waiter,
51 				     struct task_struct *task);
52 extern int rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
53 			       struct hrtimer_sleeper *to,
54 			       struct rt_mutex_waiter *waiter);
55 extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
56 				 struct rt_mutex_waiter *waiter);
57 
58 extern int rt_mutex_futex_trylock(struct rt_mutex_base *l);
59 extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l);
60 
61 extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock);
62 extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
63 				struct wake_q_head *wake_q);
64 
65 extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
66 
67 /*
68  * Must be guarded because this header is included from rcu/tree_plugin.h
69  * unconditionally.
70  */
71 #ifdef CONFIG_RT_MUTEXES
72 static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock)
73 {
74 	return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
75 }
76 
77 static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock)
78 {
79 	struct rb_node *leftmost = rb_first_cached(&lock->waiters);
80 	struct rt_mutex_waiter *w = NULL;
81 
82 	if (leftmost) {
83 		w = rb_entry(leftmost, struct rt_mutex_waiter, tree_entry);
84 		BUG_ON(w->lock != lock);
85 	}
86 	return w;
87 }
88 
89 static inline int task_has_pi_waiters(struct task_struct *p)
90 {
91 	return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root);
92 }
93 
94 static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p)
95 {
96 	return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter,
97 			pi_tree_entry);
98 }
99 
100 #define RT_MUTEX_HAS_WAITERS	1UL
101 
102 static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
103 {
104 	unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
105 
106 	return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
107 }
108 
109 /*
110  * Constants for rt mutex functions which have a selectable deadlock
111  * detection.
112  *
113  * RT_MUTEX_MIN_CHAINWALK:	Stops the lock chain walk when there are
114  *				no further PI adjustments to be made.
115  *
116  * RT_MUTEX_FULL_CHAINWALK:	Invoke deadlock detection with a full
117  *				walk of the lock chain.
118  */
119 enum rtmutex_chainwalk {
120 	RT_MUTEX_MIN_CHAINWALK,
121 	RT_MUTEX_FULL_CHAINWALK,
122 };
123 
124 static inline void __rt_mutex_base_init(struct rt_mutex_base *lock)
125 {
126 	raw_spin_lock_init(&lock->wait_lock);
127 	lock->waiters = RB_ROOT_CACHED;
128 	lock->owner = NULL;
129 }
130 
131 /* Debug functions */
132 static inline void debug_rt_mutex_unlock(struct rt_mutex_base *lock)
133 {
134 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
135 		DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
136 }
137 
138 static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
139 {
140 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
141 		DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
142 }
143 
144 static inline void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
145 {
146 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
147 		memset(waiter, 0x11, sizeof(*waiter));
148 }
149 
150 static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
151 {
152 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
153 		memset(waiter, 0x22, sizeof(*waiter));
154 }
155 
156 static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
157 {
158 	debug_rt_mutex_init_waiter(waiter);
159 	RB_CLEAR_NODE(&waiter->pi_tree_entry);
160 	RB_CLEAR_NODE(&waiter->tree_entry);
161 	waiter->task = NULL;
162 }
163 
164 #else /* CONFIG_RT_MUTEXES */
165 /* Used in rcu/tree_plugin.h */
166 static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
167 {
168 	return NULL;
169 }
170 #endif  /* !CONFIG_RT_MUTEXES */
171 
172 #endif
173