xref: /linux-6.15/kernel/locking/rtmutex_api.c (revision bb630f9f)
1531ae4b0SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2531ae4b0SThomas Gleixner /*
3531ae4b0SThomas Gleixner  * rtmutex API
4531ae4b0SThomas Gleixner  */
5531ae4b0SThomas Gleixner #include <linux/spinlock.h>
6531ae4b0SThomas Gleixner #include <linux/export.h>
7531ae4b0SThomas Gleixner 
8e17ba59bSThomas Gleixner #define RT_MUTEX_BUILD_MUTEX
9531ae4b0SThomas Gleixner #include "rtmutex.c"
10531ae4b0SThomas Gleixner 
11531ae4b0SThomas Gleixner /*
12531ae4b0SThomas Gleixner  * Max number of times we'll walk the boosting chain:
13531ae4b0SThomas Gleixner  */
14531ae4b0SThomas Gleixner int max_lock_depth = 1024;
15531ae4b0SThomas Gleixner 
16531ae4b0SThomas Gleixner /*
17531ae4b0SThomas Gleixner  * Debug aware fast / slowpath lock,trylock,unlock
18531ae4b0SThomas Gleixner  *
19531ae4b0SThomas Gleixner  * The atomic acquire/release ops are compiled away, when either the
20531ae4b0SThomas Gleixner  * architecture does not support cmpxchg or when debugging is enabled.
21531ae4b0SThomas Gleixner  */
22531ae4b0SThomas Gleixner static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
23531ae4b0SThomas Gleixner 						  unsigned int state,
24531ae4b0SThomas Gleixner 						  unsigned int subclass)
25531ae4b0SThomas Gleixner {
26531ae4b0SThomas Gleixner 	int ret;
27531ae4b0SThomas Gleixner 
28531ae4b0SThomas Gleixner 	might_sleep();
29531ae4b0SThomas Gleixner 	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
30830e6accSPeter Zijlstra 	ret = __rt_mutex_lock(&lock->rtmutex, state);
31531ae4b0SThomas Gleixner 	if (ret)
32531ae4b0SThomas Gleixner 		mutex_release(&lock->dep_map, _RET_IP_);
33531ae4b0SThomas Gleixner 	return ret;
34531ae4b0SThomas Gleixner }
35531ae4b0SThomas Gleixner 
36830e6accSPeter Zijlstra void rt_mutex_base_init(struct rt_mutex_base *rtb)
37830e6accSPeter Zijlstra {
38830e6accSPeter Zijlstra 	__rt_mutex_base_init(rtb);
39830e6accSPeter Zijlstra }
40830e6accSPeter Zijlstra EXPORT_SYMBOL(rt_mutex_base_init);
41830e6accSPeter Zijlstra 
42531ae4b0SThomas Gleixner #ifdef CONFIG_DEBUG_LOCK_ALLOC
43531ae4b0SThomas Gleixner /**
44531ae4b0SThomas Gleixner  * rt_mutex_lock_nested - lock a rt_mutex
45531ae4b0SThomas Gleixner  *
46531ae4b0SThomas Gleixner  * @lock: the rt_mutex to be locked
47531ae4b0SThomas Gleixner  * @subclass: the lockdep subclass
48531ae4b0SThomas Gleixner  */
49531ae4b0SThomas Gleixner void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
50531ae4b0SThomas Gleixner {
51531ae4b0SThomas Gleixner 	__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
52531ae4b0SThomas Gleixner }
53531ae4b0SThomas Gleixner EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
54531ae4b0SThomas Gleixner 
55531ae4b0SThomas Gleixner #else /* !CONFIG_DEBUG_LOCK_ALLOC */
56531ae4b0SThomas Gleixner 
57531ae4b0SThomas Gleixner /**
58531ae4b0SThomas Gleixner  * rt_mutex_lock - lock a rt_mutex
59531ae4b0SThomas Gleixner  *
60531ae4b0SThomas Gleixner  * @lock: the rt_mutex to be locked
61531ae4b0SThomas Gleixner  */
62531ae4b0SThomas Gleixner void __sched rt_mutex_lock(struct rt_mutex *lock)
63531ae4b0SThomas Gleixner {
64531ae4b0SThomas Gleixner 	__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
65531ae4b0SThomas Gleixner }
66531ae4b0SThomas Gleixner EXPORT_SYMBOL_GPL(rt_mutex_lock);
67531ae4b0SThomas Gleixner #endif
68531ae4b0SThomas Gleixner 
69531ae4b0SThomas Gleixner /**
70531ae4b0SThomas Gleixner  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
71531ae4b0SThomas Gleixner  *
72531ae4b0SThomas Gleixner  * @lock:		the rt_mutex to be locked
73531ae4b0SThomas Gleixner  *
74531ae4b0SThomas Gleixner  * Returns:
75531ae4b0SThomas Gleixner  *  0		on success
76531ae4b0SThomas Gleixner  * -EINTR	when interrupted by a signal
77531ae4b0SThomas Gleixner  */
78531ae4b0SThomas Gleixner int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
79531ae4b0SThomas Gleixner {
80531ae4b0SThomas Gleixner 	return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
81531ae4b0SThomas Gleixner }
82531ae4b0SThomas Gleixner EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
83531ae4b0SThomas Gleixner 
84531ae4b0SThomas Gleixner /**
85531ae4b0SThomas Gleixner  * rt_mutex_trylock - try to lock a rt_mutex
86531ae4b0SThomas Gleixner  *
87531ae4b0SThomas Gleixner  * @lock:	the rt_mutex to be locked
88531ae4b0SThomas Gleixner  *
89531ae4b0SThomas Gleixner  * This function can only be called in thread context. It's safe to call it
90531ae4b0SThomas Gleixner  * from atomic regions, but not from hard or soft interrupt context.
91531ae4b0SThomas Gleixner  *
92531ae4b0SThomas Gleixner  * Returns:
93531ae4b0SThomas Gleixner  *  1 on success
94531ae4b0SThomas Gleixner  *  0 on contention
95531ae4b0SThomas Gleixner  */
96531ae4b0SThomas Gleixner int __sched rt_mutex_trylock(struct rt_mutex *lock)
97531ae4b0SThomas Gleixner {
98531ae4b0SThomas Gleixner 	int ret;
99531ae4b0SThomas Gleixner 
100531ae4b0SThomas Gleixner 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
101531ae4b0SThomas Gleixner 		return 0;
102531ae4b0SThomas Gleixner 
103830e6accSPeter Zijlstra 	ret = __rt_mutex_trylock(&lock->rtmutex);
104531ae4b0SThomas Gleixner 	if (ret)
105531ae4b0SThomas Gleixner 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
106531ae4b0SThomas Gleixner 
107531ae4b0SThomas Gleixner 	return ret;
108531ae4b0SThomas Gleixner }
109531ae4b0SThomas Gleixner EXPORT_SYMBOL_GPL(rt_mutex_trylock);
110531ae4b0SThomas Gleixner 
111531ae4b0SThomas Gleixner /**
112531ae4b0SThomas Gleixner  * rt_mutex_unlock - unlock a rt_mutex
113531ae4b0SThomas Gleixner  *
114531ae4b0SThomas Gleixner  * @lock: the rt_mutex to be unlocked
115531ae4b0SThomas Gleixner  */
116531ae4b0SThomas Gleixner void __sched rt_mutex_unlock(struct rt_mutex *lock)
117531ae4b0SThomas Gleixner {
118531ae4b0SThomas Gleixner 	mutex_release(&lock->dep_map, _RET_IP_);
119830e6accSPeter Zijlstra 	__rt_mutex_unlock(&lock->rtmutex);
120531ae4b0SThomas Gleixner }
121531ae4b0SThomas Gleixner EXPORT_SYMBOL_GPL(rt_mutex_unlock);
122531ae4b0SThomas Gleixner 
123531ae4b0SThomas Gleixner /*
124531ae4b0SThomas Gleixner  * Futex variants, must not use fastpath.
125531ae4b0SThomas Gleixner  */
126830e6accSPeter Zijlstra int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
127531ae4b0SThomas Gleixner {
128531ae4b0SThomas Gleixner 	return rt_mutex_slowtrylock(lock);
129531ae4b0SThomas Gleixner }
130531ae4b0SThomas Gleixner 
131830e6accSPeter Zijlstra int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
132531ae4b0SThomas Gleixner {
133531ae4b0SThomas Gleixner 	return __rt_mutex_slowtrylock(lock);
134531ae4b0SThomas Gleixner }
135531ae4b0SThomas Gleixner 
136531ae4b0SThomas Gleixner /**
137531ae4b0SThomas Gleixner  * __rt_mutex_futex_unlock - Futex variant, that since futex variants
138531ae4b0SThomas Gleixner  * do not use the fast-path, can be simple and will not need to retry.
139531ae4b0SThomas Gleixner  *
140531ae4b0SThomas Gleixner  * @lock:	The rt_mutex to be unlocked
1417980aa39SThomas Gleixner  * @wqh:	The wake queue head from which to get the next lock waiter
142531ae4b0SThomas Gleixner  */
143830e6accSPeter Zijlstra bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
1447980aa39SThomas Gleixner 				     struct rt_wake_q_head *wqh)
145531ae4b0SThomas Gleixner {
146531ae4b0SThomas Gleixner 	lockdep_assert_held(&lock->wait_lock);
147531ae4b0SThomas Gleixner 
148531ae4b0SThomas Gleixner 	debug_rt_mutex_unlock(lock);
149531ae4b0SThomas Gleixner 
150531ae4b0SThomas Gleixner 	if (!rt_mutex_has_waiters(lock)) {
151531ae4b0SThomas Gleixner 		lock->owner = NULL;
152531ae4b0SThomas Gleixner 		return false; /* done */
153531ae4b0SThomas Gleixner 	}
154531ae4b0SThomas Gleixner 
155531ae4b0SThomas Gleixner 	/*
156531ae4b0SThomas Gleixner 	 * We've already deboosted, mark_wakeup_next_waiter() will
157531ae4b0SThomas Gleixner 	 * retain preempt_disabled when we drop the wait_lock, to
158531ae4b0SThomas Gleixner 	 * avoid inversion prior to the wakeup.  preempt_disable()
159531ae4b0SThomas Gleixner 	 * therein pairs with rt_mutex_postunlock().
160531ae4b0SThomas Gleixner 	 */
1617980aa39SThomas Gleixner 	mark_wakeup_next_waiter(wqh, lock);
162531ae4b0SThomas Gleixner 
163531ae4b0SThomas Gleixner 	return true; /* call postunlock() */
164531ae4b0SThomas Gleixner }
165531ae4b0SThomas Gleixner 
166830e6accSPeter Zijlstra void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
167531ae4b0SThomas Gleixner {
1687980aa39SThomas Gleixner 	DEFINE_RT_WAKE_Q(wqh);
169531ae4b0SThomas Gleixner 	unsigned long flags;
170531ae4b0SThomas Gleixner 	bool postunlock;
171531ae4b0SThomas Gleixner 
172531ae4b0SThomas Gleixner 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
1737980aa39SThomas Gleixner 	postunlock = __rt_mutex_futex_unlock(lock, &wqh);
174531ae4b0SThomas Gleixner 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
175531ae4b0SThomas Gleixner 
176531ae4b0SThomas Gleixner 	if (postunlock)
1777980aa39SThomas Gleixner 		rt_mutex_postunlock(&wqh);
178531ae4b0SThomas Gleixner }
179531ae4b0SThomas Gleixner 
180531ae4b0SThomas Gleixner /**
181531ae4b0SThomas Gleixner  * __rt_mutex_init - initialize the rt_mutex
182531ae4b0SThomas Gleixner  *
183531ae4b0SThomas Gleixner  * @lock:	The rt_mutex to be initialized
184531ae4b0SThomas Gleixner  * @name:	The lock name used for debugging
185531ae4b0SThomas Gleixner  * @key:	The lock class key used for debugging
186531ae4b0SThomas Gleixner  *
187531ae4b0SThomas Gleixner  * Initialize the rt_mutex to unlocked state.
188531ae4b0SThomas Gleixner  *
189531ae4b0SThomas Gleixner  * Initializing of a locked rt_mutex is not allowed
190531ae4b0SThomas Gleixner  */
191531ae4b0SThomas Gleixner void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
192531ae4b0SThomas Gleixner 			     struct lock_class_key *key)
193531ae4b0SThomas Gleixner {
194531ae4b0SThomas Gleixner 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
195830e6accSPeter Zijlstra 	__rt_mutex_base_init(&lock->rtmutex);
196531ae4b0SThomas Gleixner 	lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
197531ae4b0SThomas Gleixner }
198531ae4b0SThomas Gleixner EXPORT_SYMBOL_GPL(__rt_mutex_init);
199531ae4b0SThomas Gleixner 
200531ae4b0SThomas Gleixner /**
201531ae4b0SThomas Gleixner  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
202531ae4b0SThomas Gleixner  *				proxy owner
203531ae4b0SThomas Gleixner  *
204531ae4b0SThomas Gleixner  * @lock:	the rt_mutex to be locked
205531ae4b0SThomas Gleixner  * @proxy_owner:the task to set as owner
206531ae4b0SThomas Gleixner  *
207531ae4b0SThomas Gleixner  * No locking. Caller has to do serializing itself
208531ae4b0SThomas Gleixner  *
209531ae4b0SThomas Gleixner  * Special API call for PI-futex support. This initializes the rtmutex and
210531ae4b0SThomas Gleixner  * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
211531ae4b0SThomas Gleixner  * possible at this point because the pi_state which contains the rtmutex
212531ae4b0SThomas Gleixner  * is not yet visible to other tasks.
213531ae4b0SThomas Gleixner  */
214830e6accSPeter Zijlstra void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
215531ae4b0SThomas Gleixner 					struct task_struct *proxy_owner)
216531ae4b0SThomas Gleixner {
217830e6accSPeter Zijlstra 	__rt_mutex_base_init(lock);
218531ae4b0SThomas Gleixner 	rt_mutex_set_owner(lock, proxy_owner);
219531ae4b0SThomas Gleixner }
220531ae4b0SThomas Gleixner 
221531ae4b0SThomas Gleixner /**
222531ae4b0SThomas Gleixner  * rt_mutex_proxy_unlock - release a lock on behalf of owner
223531ae4b0SThomas Gleixner  *
224531ae4b0SThomas Gleixner  * @lock:	the rt_mutex to be locked
225531ae4b0SThomas Gleixner  *
226531ae4b0SThomas Gleixner  * No locking. Caller has to do serializing itself
227531ae4b0SThomas Gleixner  *
228531ae4b0SThomas Gleixner  * Special API call for PI-futex support. This just cleans up the rtmutex
229531ae4b0SThomas Gleixner  * (debugging) state. Concurrent operations on this rt_mutex are not
230531ae4b0SThomas Gleixner  * possible because it belongs to the pi_state which is about to be freed
231531ae4b0SThomas Gleixner  * and it is not longer visible to other tasks.
232531ae4b0SThomas Gleixner  */
233830e6accSPeter Zijlstra void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
234531ae4b0SThomas Gleixner {
235531ae4b0SThomas Gleixner 	debug_rt_mutex_proxy_unlock(lock);
236531ae4b0SThomas Gleixner 	rt_mutex_set_owner(lock, NULL);
237531ae4b0SThomas Gleixner }
238531ae4b0SThomas Gleixner 
239531ae4b0SThomas Gleixner /**
240531ae4b0SThomas Gleixner  * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
241531ae4b0SThomas Gleixner  * @lock:		the rt_mutex to take
242531ae4b0SThomas Gleixner  * @waiter:		the pre-initialized rt_mutex_waiter
243531ae4b0SThomas Gleixner  * @task:		the task to prepare
244531ae4b0SThomas Gleixner  *
245531ae4b0SThomas Gleixner  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
246531ae4b0SThomas Gleixner  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
247531ae4b0SThomas Gleixner  *
248531ae4b0SThomas Gleixner  * NOTE: does _NOT_ remove the @waiter on failure; must either call
249531ae4b0SThomas Gleixner  * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
250531ae4b0SThomas Gleixner  *
251531ae4b0SThomas Gleixner  * Returns:
252531ae4b0SThomas Gleixner  *  0 - task blocked on lock
253531ae4b0SThomas Gleixner  *  1 - acquired the lock for task, caller should wake it up
254531ae4b0SThomas Gleixner  * <0 - error
255531ae4b0SThomas Gleixner  *
256531ae4b0SThomas Gleixner  * Special API call for PI-futex support.
257531ae4b0SThomas Gleixner  */
258830e6accSPeter Zijlstra int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
259531ae4b0SThomas Gleixner 					struct rt_mutex_waiter *waiter,
260531ae4b0SThomas Gleixner 					struct task_struct *task)
261531ae4b0SThomas Gleixner {
262531ae4b0SThomas Gleixner 	int ret;
263531ae4b0SThomas Gleixner 
264531ae4b0SThomas Gleixner 	lockdep_assert_held(&lock->wait_lock);
265531ae4b0SThomas Gleixner 
266531ae4b0SThomas Gleixner 	if (try_to_take_rt_mutex(lock, task, NULL))
267531ae4b0SThomas Gleixner 		return 1;
268531ae4b0SThomas Gleixner 
269531ae4b0SThomas Gleixner 	/* We enforce deadlock detection for futexes */
270add46132SPeter Zijlstra 	ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
271531ae4b0SThomas Gleixner 				      RT_MUTEX_FULL_CHAINWALK);
272531ae4b0SThomas Gleixner 
273531ae4b0SThomas Gleixner 	if (ret && !rt_mutex_owner(lock)) {
274531ae4b0SThomas Gleixner 		/*
275531ae4b0SThomas Gleixner 		 * Reset the return value. We might have
276531ae4b0SThomas Gleixner 		 * returned with -EDEADLK and the owner
277531ae4b0SThomas Gleixner 		 * released the lock while we were walking the
278531ae4b0SThomas Gleixner 		 * pi chain.  Let the waiter sort it out.
279531ae4b0SThomas Gleixner 		 */
280531ae4b0SThomas Gleixner 		ret = 0;
281531ae4b0SThomas Gleixner 	}
282531ae4b0SThomas Gleixner 
283531ae4b0SThomas Gleixner 	return ret;
284531ae4b0SThomas Gleixner }
285531ae4b0SThomas Gleixner 
286531ae4b0SThomas Gleixner /**
287531ae4b0SThomas Gleixner  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
288531ae4b0SThomas Gleixner  * @lock:		the rt_mutex to take
289531ae4b0SThomas Gleixner  * @waiter:		the pre-initialized rt_mutex_waiter
290531ae4b0SThomas Gleixner  * @task:		the task to prepare
291531ae4b0SThomas Gleixner  *
292531ae4b0SThomas Gleixner  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
293531ae4b0SThomas Gleixner  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
294531ae4b0SThomas Gleixner  *
295531ae4b0SThomas Gleixner  * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
296531ae4b0SThomas Gleixner  * on failure.
297531ae4b0SThomas Gleixner  *
298531ae4b0SThomas Gleixner  * Returns:
299531ae4b0SThomas Gleixner  *  0 - task blocked on lock
300531ae4b0SThomas Gleixner  *  1 - acquired the lock for task, caller should wake it up
301531ae4b0SThomas Gleixner  * <0 - error
302531ae4b0SThomas Gleixner  *
303531ae4b0SThomas Gleixner  * Special API call for PI-futex support.
304531ae4b0SThomas Gleixner  */
305830e6accSPeter Zijlstra int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
306531ae4b0SThomas Gleixner 				      struct rt_mutex_waiter *waiter,
307531ae4b0SThomas Gleixner 				      struct task_struct *task)
308531ae4b0SThomas Gleixner {
309531ae4b0SThomas Gleixner 	int ret;
310531ae4b0SThomas Gleixner 
311531ae4b0SThomas Gleixner 	raw_spin_lock_irq(&lock->wait_lock);
312531ae4b0SThomas Gleixner 	ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
313531ae4b0SThomas Gleixner 	if (unlikely(ret))
314531ae4b0SThomas Gleixner 		remove_waiter(lock, waiter);
315531ae4b0SThomas Gleixner 	raw_spin_unlock_irq(&lock->wait_lock);
316531ae4b0SThomas Gleixner 
317531ae4b0SThomas Gleixner 	return ret;
318531ae4b0SThomas Gleixner }
319531ae4b0SThomas Gleixner 
320531ae4b0SThomas Gleixner /**
321531ae4b0SThomas Gleixner  * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
322531ae4b0SThomas Gleixner  * @lock:		the rt_mutex we were woken on
323531ae4b0SThomas Gleixner  * @to:			the timeout, null if none. hrtimer should already have
324531ae4b0SThomas Gleixner  *			been started.
325531ae4b0SThomas Gleixner  * @waiter:		the pre-initialized rt_mutex_waiter
326531ae4b0SThomas Gleixner  *
327531ae4b0SThomas Gleixner  * Wait for the lock acquisition started on our behalf by
328531ae4b0SThomas Gleixner  * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
329531ae4b0SThomas Gleixner  * rt_mutex_cleanup_proxy_lock().
330531ae4b0SThomas Gleixner  *
331531ae4b0SThomas Gleixner  * Returns:
332531ae4b0SThomas Gleixner  *  0 - success
333531ae4b0SThomas Gleixner  * <0 - error, one of -EINTR, -ETIMEDOUT
334531ae4b0SThomas Gleixner  *
335531ae4b0SThomas Gleixner  * Special API call for PI-futex support
336531ae4b0SThomas Gleixner  */
337830e6accSPeter Zijlstra int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
338531ae4b0SThomas Gleixner 				     struct hrtimer_sleeper *to,
339531ae4b0SThomas Gleixner 				     struct rt_mutex_waiter *waiter)
340531ae4b0SThomas Gleixner {
341531ae4b0SThomas Gleixner 	int ret;
342531ae4b0SThomas Gleixner 
343531ae4b0SThomas Gleixner 	raw_spin_lock_irq(&lock->wait_lock);
344531ae4b0SThomas Gleixner 	/* sleep on the mutex */
345531ae4b0SThomas Gleixner 	set_current_state(TASK_INTERRUPTIBLE);
346add46132SPeter Zijlstra 	ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
347531ae4b0SThomas Gleixner 	/*
348531ae4b0SThomas Gleixner 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
349531ae4b0SThomas Gleixner 	 * have to fix that up.
350531ae4b0SThomas Gleixner 	 */
351531ae4b0SThomas Gleixner 	fixup_rt_mutex_waiters(lock);
352531ae4b0SThomas Gleixner 	raw_spin_unlock_irq(&lock->wait_lock);
353531ae4b0SThomas Gleixner 
354531ae4b0SThomas Gleixner 	return ret;
355531ae4b0SThomas Gleixner }
356531ae4b0SThomas Gleixner 
357531ae4b0SThomas Gleixner /**
358531ae4b0SThomas Gleixner  * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
359531ae4b0SThomas Gleixner  * @lock:		the rt_mutex we were woken on
360531ae4b0SThomas Gleixner  * @waiter:		the pre-initialized rt_mutex_waiter
361531ae4b0SThomas Gleixner  *
362531ae4b0SThomas Gleixner  * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
363531ae4b0SThomas Gleixner  * rt_mutex_wait_proxy_lock().
364531ae4b0SThomas Gleixner  *
365531ae4b0SThomas Gleixner  * Unless we acquired the lock; we're still enqueued on the wait-list and can
366531ae4b0SThomas Gleixner  * in fact still be granted ownership until we're removed. Therefore we can
367531ae4b0SThomas Gleixner  * find we are in fact the owner and must disregard the
368531ae4b0SThomas Gleixner  * rt_mutex_wait_proxy_lock() failure.
369531ae4b0SThomas Gleixner  *
370531ae4b0SThomas Gleixner  * Returns:
371531ae4b0SThomas Gleixner  *  true  - did the cleanup, we done.
372531ae4b0SThomas Gleixner  *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
373531ae4b0SThomas Gleixner  *          caller should disregards its return value.
374531ae4b0SThomas Gleixner  *
375531ae4b0SThomas Gleixner  * Special API call for PI-futex support
376531ae4b0SThomas Gleixner  */
377830e6accSPeter Zijlstra bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
378531ae4b0SThomas Gleixner 					 struct rt_mutex_waiter *waiter)
379531ae4b0SThomas Gleixner {
380531ae4b0SThomas Gleixner 	bool cleanup = false;
381531ae4b0SThomas Gleixner 
382531ae4b0SThomas Gleixner 	raw_spin_lock_irq(&lock->wait_lock);
383531ae4b0SThomas Gleixner 	/*
384531ae4b0SThomas Gleixner 	 * Do an unconditional try-lock, this deals with the lock stealing
385531ae4b0SThomas Gleixner 	 * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
386531ae4b0SThomas Gleixner 	 * sets a NULL owner.
387531ae4b0SThomas Gleixner 	 *
388531ae4b0SThomas Gleixner 	 * We're not interested in the return value, because the subsequent
389531ae4b0SThomas Gleixner 	 * test on rt_mutex_owner() will infer that. If the trylock succeeded,
390531ae4b0SThomas Gleixner 	 * we will own the lock and it will have removed the waiter. If we
391531ae4b0SThomas Gleixner 	 * failed the trylock, we're still not owner and we need to remove
392531ae4b0SThomas Gleixner 	 * ourselves.
393531ae4b0SThomas Gleixner 	 */
394531ae4b0SThomas Gleixner 	try_to_take_rt_mutex(lock, current, waiter);
395531ae4b0SThomas Gleixner 	/*
396531ae4b0SThomas Gleixner 	 * Unless we're the owner; we're still enqueued on the wait_list.
397531ae4b0SThomas Gleixner 	 * So check if we became owner, if not, take us off the wait_list.
398531ae4b0SThomas Gleixner 	 */
399531ae4b0SThomas Gleixner 	if (rt_mutex_owner(lock) != current) {
400531ae4b0SThomas Gleixner 		remove_waiter(lock, waiter);
401531ae4b0SThomas Gleixner 		cleanup = true;
402531ae4b0SThomas Gleixner 	}
403531ae4b0SThomas Gleixner 	/*
404531ae4b0SThomas Gleixner 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
405531ae4b0SThomas Gleixner 	 * have to fix that up.
406531ae4b0SThomas Gleixner 	 */
407531ae4b0SThomas Gleixner 	fixup_rt_mutex_waiters(lock);
408531ae4b0SThomas Gleixner 
409531ae4b0SThomas Gleixner 	raw_spin_unlock_irq(&lock->wait_lock);
410531ae4b0SThomas Gleixner 
411531ae4b0SThomas Gleixner 	return cleanup;
412531ae4b0SThomas Gleixner }
413531ae4b0SThomas Gleixner 
414531ae4b0SThomas Gleixner /*
415531ae4b0SThomas Gleixner  * Recheck the pi chain, in case we got a priority setting
416531ae4b0SThomas Gleixner  *
417531ae4b0SThomas Gleixner  * Called from sched_setscheduler
418531ae4b0SThomas Gleixner  */
419531ae4b0SThomas Gleixner void __sched rt_mutex_adjust_pi(struct task_struct *task)
420531ae4b0SThomas Gleixner {
421531ae4b0SThomas Gleixner 	struct rt_mutex_waiter *waiter;
422830e6accSPeter Zijlstra 	struct rt_mutex_base *next_lock;
423531ae4b0SThomas Gleixner 	unsigned long flags;
424531ae4b0SThomas Gleixner 
425531ae4b0SThomas Gleixner 	raw_spin_lock_irqsave(&task->pi_lock, flags);
426531ae4b0SThomas Gleixner 
427531ae4b0SThomas Gleixner 	waiter = task->pi_blocked_on;
428531ae4b0SThomas Gleixner 	if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
429531ae4b0SThomas Gleixner 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
430531ae4b0SThomas Gleixner 		return;
431531ae4b0SThomas Gleixner 	}
432531ae4b0SThomas Gleixner 	next_lock = waiter->lock;
433531ae4b0SThomas Gleixner 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
434531ae4b0SThomas Gleixner 
435531ae4b0SThomas Gleixner 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
436531ae4b0SThomas Gleixner 	get_task_struct(task);
437531ae4b0SThomas Gleixner 
438531ae4b0SThomas Gleixner 	rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
439531ae4b0SThomas Gleixner 				   next_lock, NULL, task);
440531ae4b0SThomas Gleixner }
441531ae4b0SThomas Gleixner 
442531ae4b0SThomas Gleixner /*
443531ae4b0SThomas Gleixner  * Performs the wakeup of the top-waiter and re-enables preemption.
444531ae4b0SThomas Gleixner  */
4457980aa39SThomas Gleixner void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh)
446531ae4b0SThomas Gleixner {
4477980aa39SThomas Gleixner 	rt_mutex_wake_up_q(wqh);
448531ae4b0SThomas Gleixner }
449531ae4b0SThomas Gleixner 
450531ae4b0SThomas Gleixner #ifdef CONFIG_DEBUG_RT_MUTEXES
451531ae4b0SThomas Gleixner void rt_mutex_debug_task_free(struct task_struct *task)
452531ae4b0SThomas Gleixner {
453531ae4b0SThomas Gleixner 	DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
454531ae4b0SThomas Gleixner 	DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
455531ae4b0SThomas Gleixner }
456531ae4b0SThomas Gleixner #endif
457*bb630f9fSThomas Gleixner 
458*bb630f9fSThomas Gleixner #ifdef CONFIG_PREEMPT_RT
459*bb630f9fSThomas Gleixner /* Mutexes */
460*bb630f9fSThomas Gleixner void __mutex_rt_init(struct mutex *mutex, const char *name,
461*bb630f9fSThomas Gleixner 		     struct lock_class_key *key)
462*bb630f9fSThomas Gleixner {
463*bb630f9fSThomas Gleixner 	debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
464*bb630f9fSThomas Gleixner 	lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
465*bb630f9fSThomas Gleixner }
466*bb630f9fSThomas Gleixner EXPORT_SYMBOL(__mutex_rt_init);
467*bb630f9fSThomas Gleixner 
468*bb630f9fSThomas Gleixner static __always_inline int __mutex_lock_common(struct mutex *lock,
469*bb630f9fSThomas Gleixner 					       unsigned int state,
470*bb630f9fSThomas Gleixner 					       unsigned int subclass,
471*bb630f9fSThomas Gleixner 					       struct lockdep_map *nest_lock,
472*bb630f9fSThomas Gleixner 					       unsigned long ip)
473*bb630f9fSThomas Gleixner {
474*bb630f9fSThomas Gleixner 	int ret;
475*bb630f9fSThomas Gleixner 
476*bb630f9fSThomas Gleixner 	might_sleep();
477*bb630f9fSThomas Gleixner 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
478*bb630f9fSThomas Gleixner 	ret = __rt_mutex_lock(&lock->rtmutex, state);
479*bb630f9fSThomas Gleixner 	if (ret)
480*bb630f9fSThomas Gleixner 		mutex_release(&lock->dep_map, ip);
481*bb630f9fSThomas Gleixner 	else
482*bb630f9fSThomas Gleixner 		lock_acquired(&lock->dep_map, ip);
483*bb630f9fSThomas Gleixner 	return ret;
484*bb630f9fSThomas Gleixner }
485*bb630f9fSThomas Gleixner 
486*bb630f9fSThomas Gleixner #ifdef CONFIG_DEBUG_LOCK_ALLOC
487*bb630f9fSThomas Gleixner void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass)
488*bb630f9fSThomas Gleixner {
489*bb630f9fSThomas Gleixner 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
490*bb630f9fSThomas Gleixner }
491*bb630f9fSThomas Gleixner EXPORT_SYMBOL_GPL(mutex_lock_nested);
492*bb630f9fSThomas Gleixner 
493*bb630f9fSThomas Gleixner void __sched _mutex_lock_nest_lock(struct mutex *lock,
494*bb630f9fSThomas Gleixner 				   struct lockdep_map *nest_lock)
495*bb630f9fSThomas Gleixner {
496*bb630f9fSThomas Gleixner 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_);
497*bb630f9fSThomas Gleixner }
498*bb630f9fSThomas Gleixner EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
499*bb630f9fSThomas Gleixner 
500*bb630f9fSThomas Gleixner int __sched mutex_lock_interruptible_nested(struct mutex *lock,
501*bb630f9fSThomas Gleixner 					    unsigned int subclass)
502*bb630f9fSThomas Gleixner {
503*bb630f9fSThomas Gleixner 	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
504*bb630f9fSThomas Gleixner }
505*bb630f9fSThomas Gleixner EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
506*bb630f9fSThomas Gleixner 
507*bb630f9fSThomas Gleixner int __sched mutex_lock_killable_nested(struct mutex *lock,
508*bb630f9fSThomas Gleixner 					    unsigned int subclass)
509*bb630f9fSThomas Gleixner {
510*bb630f9fSThomas Gleixner 	return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
511*bb630f9fSThomas Gleixner }
512*bb630f9fSThomas Gleixner EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
513*bb630f9fSThomas Gleixner 
514*bb630f9fSThomas Gleixner void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
515*bb630f9fSThomas Gleixner {
516*bb630f9fSThomas Gleixner 	int token;
517*bb630f9fSThomas Gleixner 
518*bb630f9fSThomas Gleixner 	might_sleep();
519*bb630f9fSThomas Gleixner 
520*bb630f9fSThomas Gleixner 	token = io_schedule_prepare();
521*bb630f9fSThomas Gleixner 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
522*bb630f9fSThomas Gleixner 	io_schedule_finish(token);
523*bb630f9fSThomas Gleixner }
524*bb630f9fSThomas Gleixner EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
525*bb630f9fSThomas Gleixner 
526*bb630f9fSThomas Gleixner #else /* CONFIG_DEBUG_LOCK_ALLOC */
527*bb630f9fSThomas Gleixner 
528*bb630f9fSThomas Gleixner void __sched mutex_lock(struct mutex *lock)
529*bb630f9fSThomas Gleixner {
530*bb630f9fSThomas Gleixner 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
531*bb630f9fSThomas Gleixner }
532*bb630f9fSThomas Gleixner EXPORT_SYMBOL(mutex_lock);
533*bb630f9fSThomas Gleixner 
534*bb630f9fSThomas Gleixner int __sched mutex_lock_interruptible(struct mutex *lock)
535*bb630f9fSThomas Gleixner {
536*bb630f9fSThomas Gleixner 	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
537*bb630f9fSThomas Gleixner }
538*bb630f9fSThomas Gleixner EXPORT_SYMBOL(mutex_lock_interruptible);
539*bb630f9fSThomas Gleixner 
540*bb630f9fSThomas Gleixner int __sched mutex_lock_killable(struct mutex *lock)
541*bb630f9fSThomas Gleixner {
542*bb630f9fSThomas Gleixner 	return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
543*bb630f9fSThomas Gleixner }
544*bb630f9fSThomas Gleixner EXPORT_SYMBOL(mutex_lock_killable);
545*bb630f9fSThomas Gleixner 
546*bb630f9fSThomas Gleixner void __sched mutex_lock_io(struct mutex *lock)
547*bb630f9fSThomas Gleixner {
548*bb630f9fSThomas Gleixner 	int token = io_schedule_prepare();
549*bb630f9fSThomas Gleixner 
550*bb630f9fSThomas Gleixner 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
551*bb630f9fSThomas Gleixner 	io_schedule_finish(token);
552*bb630f9fSThomas Gleixner }
553*bb630f9fSThomas Gleixner EXPORT_SYMBOL(mutex_lock_io);
554*bb630f9fSThomas Gleixner #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
555*bb630f9fSThomas Gleixner 
556*bb630f9fSThomas Gleixner int __sched mutex_trylock(struct mutex *lock)
557*bb630f9fSThomas Gleixner {
558*bb630f9fSThomas Gleixner 	int ret;
559*bb630f9fSThomas Gleixner 
560*bb630f9fSThomas Gleixner 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
561*bb630f9fSThomas Gleixner 		return 0;
562*bb630f9fSThomas Gleixner 
563*bb630f9fSThomas Gleixner 	ret = __rt_mutex_trylock(&lock->rtmutex);
564*bb630f9fSThomas Gleixner 	if (ret)
565*bb630f9fSThomas Gleixner 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
566*bb630f9fSThomas Gleixner 
567*bb630f9fSThomas Gleixner 	return ret;
568*bb630f9fSThomas Gleixner }
569*bb630f9fSThomas Gleixner EXPORT_SYMBOL(mutex_trylock);
570*bb630f9fSThomas Gleixner 
571*bb630f9fSThomas Gleixner void __sched mutex_unlock(struct mutex *lock)
572*bb630f9fSThomas Gleixner {
573*bb630f9fSThomas Gleixner 	mutex_release(&lock->dep_map, _RET_IP_);
574*bb630f9fSThomas Gleixner 	__rt_mutex_unlock(&lock->rtmutex);
575*bb630f9fSThomas Gleixner }
576*bb630f9fSThomas Gleixner EXPORT_SYMBOL(mutex_unlock);
577*bb630f9fSThomas Gleixner 
578*bb630f9fSThomas Gleixner #endif /* CONFIG_PREEMPT_RT */
579