xref: /linux-6.15/kernel/locking/rtmutex_api.c (revision ebbdc41e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * rtmutex API
4  */
5 #include <linux/spinlock.h>
6 #include <linux/export.h>
7 
8 #include "rtmutex.c"
9 
10 /*
11  * Max number of times we'll walk the boosting chain:
12  */
13 int max_lock_depth = 1024;
14 
15 /*
16  * Debug aware fast / slowpath lock,trylock,unlock
17  *
18  * The atomic acquire/release ops are compiled away, when either the
19  * architecture does not support cmpxchg or when debugging is enabled.
20  */
21 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
22 						  unsigned int state,
23 						  unsigned int subclass)
24 {
25 	int ret;
26 
27 	might_sleep();
28 	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
29 	ret = __rt_mutex_lock(&lock->rtmutex, state);
30 	if (ret)
31 		mutex_release(&lock->dep_map, _RET_IP_);
32 	return ret;
33 }
34 
35 void rt_mutex_base_init(struct rt_mutex_base *rtb)
36 {
37 	__rt_mutex_base_init(rtb);
38 }
39 EXPORT_SYMBOL(rt_mutex_base_init);
40 
41 #ifdef CONFIG_DEBUG_LOCK_ALLOC
42 /**
43  * rt_mutex_lock_nested - lock a rt_mutex
44  *
45  * @lock: the rt_mutex to be locked
46  * @subclass: the lockdep subclass
47  */
48 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
49 {
50 	__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
51 }
52 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
53 
54 #else /* !CONFIG_DEBUG_LOCK_ALLOC */
55 
56 /**
57  * rt_mutex_lock - lock a rt_mutex
58  *
59  * @lock: the rt_mutex to be locked
60  */
61 void __sched rt_mutex_lock(struct rt_mutex *lock)
62 {
63 	__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
64 }
65 EXPORT_SYMBOL_GPL(rt_mutex_lock);
66 #endif
67 
68 /**
69  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
70  *
71  * @lock:		the rt_mutex to be locked
72  *
73  * Returns:
74  *  0		on success
75  * -EINTR	when interrupted by a signal
76  */
77 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
78 {
79 	return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
80 }
81 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
82 
83 /**
84  * rt_mutex_trylock - try to lock a rt_mutex
85  *
86  * @lock:	the rt_mutex to be locked
87  *
88  * This function can only be called in thread context. It's safe to call it
89  * from atomic regions, but not from hard or soft interrupt context.
90  *
91  * Returns:
92  *  1 on success
93  *  0 on contention
94  */
95 int __sched rt_mutex_trylock(struct rt_mutex *lock)
96 {
97 	int ret;
98 
99 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
100 		return 0;
101 
102 	ret = __rt_mutex_trylock(&lock->rtmutex);
103 	if (ret)
104 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
105 
106 	return ret;
107 }
108 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
109 
110 /**
111  * rt_mutex_unlock - unlock a rt_mutex
112  *
113  * @lock: the rt_mutex to be unlocked
114  */
115 void __sched rt_mutex_unlock(struct rt_mutex *lock)
116 {
117 	mutex_release(&lock->dep_map, _RET_IP_);
118 	__rt_mutex_unlock(&lock->rtmutex);
119 }
120 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
121 
122 /*
123  * Futex variants, must not use fastpath.
124  */
125 int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
126 {
127 	return rt_mutex_slowtrylock(lock);
128 }
129 
130 int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
131 {
132 	return __rt_mutex_slowtrylock(lock);
133 }
134 
135 /**
136  * __rt_mutex_futex_unlock - Futex variant, that since futex variants
137  * do not use the fast-path, can be simple and will not need to retry.
138  *
139  * @lock:	The rt_mutex to be unlocked
140  * @wake_q:	The wake queue head from which to get the next lock waiter
141  */
142 bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
143 				     struct wake_q_head *wake_q)
144 {
145 	lockdep_assert_held(&lock->wait_lock);
146 
147 	debug_rt_mutex_unlock(lock);
148 
149 	if (!rt_mutex_has_waiters(lock)) {
150 		lock->owner = NULL;
151 		return false; /* done */
152 	}
153 
154 	/*
155 	 * We've already deboosted, mark_wakeup_next_waiter() will
156 	 * retain preempt_disabled when we drop the wait_lock, to
157 	 * avoid inversion prior to the wakeup.  preempt_disable()
158 	 * therein pairs with rt_mutex_postunlock().
159 	 */
160 	mark_wakeup_next_waiter(wake_q, lock);
161 
162 	return true; /* call postunlock() */
163 }
164 
165 void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
166 {
167 	DEFINE_WAKE_Q(wake_q);
168 	unsigned long flags;
169 	bool postunlock;
170 
171 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
172 	postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
173 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
174 
175 	if (postunlock)
176 		rt_mutex_postunlock(&wake_q);
177 }
178 
179 /**
180  * __rt_mutex_init - initialize the rt_mutex
181  *
182  * @lock:	The rt_mutex to be initialized
183  * @name:	The lock name used for debugging
184  * @key:	The lock class key used for debugging
185  *
186  * Initialize the rt_mutex to unlocked state.
187  *
188  * Initializing of a locked rt_mutex is not allowed
189  */
190 void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
191 			     struct lock_class_key *key)
192 {
193 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
194 	__rt_mutex_base_init(&lock->rtmutex);
195 	lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
196 }
197 EXPORT_SYMBOL_GPL(__rt_mutex_init);
198 
199 /**
200  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
201  *				proxy owner
202  *
203  * @lock:	the rt_mutex to be locked
204  * @proxy_owner:the task to set as owner
205  *
206  * No locking. Caller has to do serializing itself
207  *
208  * Special API call for PI-futex support. This initializes the rtmutex and
209  * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
210  * possible at this point because the pi_state which contains the rtmutex
211  * is not yet visible to other tasks.
212  */
213 void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
214 					struct task_struct *proxy_owner)
215 {
216 	__rt_mutex_base_init(lock);
217 	rt_mutex_set_owner(lock, proxy_owner);
218 }
219 
220 /**
221  * rt_mutex_proxy_unlock - release a lock on behalf of owner
222  *
223  * @lock:	the rt_mutex to be locked
224  *
225  * No locking. Caller has to do serializing itself
226  *
227  * Special API call for PI-futex support. This just cleans up the rtmutex
228  * (debugging) state. Concurrent operations on this rt_mutex are not
229  * possible because it belongs to the pi_state which is about to be freed
230  * and it is not longer visible to other tasks.
231  */
232 void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
233 {
234 	debug_rt_mutex_proxy_unlock(lock);
235 	rt_mutex_set_owner(lock, NULL);
236 }
237 
238 /**
239  * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
240  * @lock:		the rt_mutex to take
241  * @waiter:		the pre-initialized rt_mutex_waiter
242  * @task:		the task to prepare
243  *
244  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
245  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
246  *
247  * NOTE: does _NOT_ remove the @waiter on failure; must either call
248  * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
249  *
250  * Returns:
251  *  0 - task blocked on lock
252  *  1 - acquired the lock for task, caller should wake it up
253  * <0 - error
254  *
255  * Special API call for PI-futex support.
256  */
257 int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
258 					struct rt_mutex_waiter *waiter,
259 					struct task_struct *task)
260 {
261 	int ret;
262 
263 	lockdep_assert_held(&lock->wait_lock);
264 
265 	if (try_to_take_rt_mutex(lock, task, NULL))
266 		return 1;
267 
268 	/* We enforce deadlock detection for futexes */
269 	ret = task_blocks_on_rt_mutex(lock, waiter, task,
270 				      RT_MUTEX_FULL_CHAINWALK);
271 
272 	if (ret && !rt_mutex_owner(lock)) {
273 		/*
274 		 * Reset the return value. We might have
275 		 * returned with -EDEADLK and the owner
276 		 * released the lock while we were walking the
277 		 * pi chain.  Let the waiter sort it out.
278 		 */
279 		ret = 0;
280 	}
281 
282 	return ret;
283 }
284 
285 /**
286  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
287  * @lock:		the rt_mutex to take
288  * @waiter:		the pre-initialized rt_mutex_waiter
289  * @task:		the task to prepare
290  *
291  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
292  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
293  *
294  * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
295  * on failure.
296  *
297  * Returns:
298  *  0 - task blocked on lock
299  *  1 - acquired the lock for task, caller should wake it up
300  * <0 - error
301  *
302  * Special API call for PI-futex support.
303  */
304 int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
305 				      struct rt_mutex_waiter *waiter,
306 				      struct task_struct *task)
307 {
308 	int ret;
309 
310 	raw_spin_lock_irq(&lock->wait_lock);
311 	ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
312 	if (unlikely(ret))
313 		remove_waiter(lock, waiter);
314 	raw_spin_unlock_irq(&lock->wait_lock);
315 
316 	return ret;
317 }
318 
319 /**
320  * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
321  * @lock:		the rt_mutex we were woken on
322  * @to:			the timeout, null if none. hrtimer should already have
323  *			been started.
324  * @waiter:		the pre-initialized rt_mutex_waiter
325  *
326  * Wait for the lock acquisition started on our behalf by
327  * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
328  * rt_mutex_cleanup_proxy_lock().
329  *
330  * Returns:
331  *  0 - success
332  * <0 - error, one of -EINTR, -ETIMEDOUT
333  *
334  * Special API call for PI-futex support
335  */
336 int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
337 				     struct hrtimer_sleeper *to,
338 				     struct rt_mutex_waiter *waiter)
339 {
340 	int ret;
341 
342 	raw_spin_lock_irq(&lock->wait_lock);
343 	/* sleep on the mutex */
344 	set_current_state(TASK_INTERRUPTIBLE);
345 	ret = rt_mutex_slowlock_block(lock, TASK_INTERRUPTIBLE, to, waiter);
346 	/*
347 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
348 	 * have to fix that up.
349 	 */
350 	fixup_rt_mutex_waiters(lock);
351 	raw_spin_unlock_irq(&lock->wait_lock);
352 
353 	return ret;
354 }
355 
356 /**
357  * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
358  * @lock:		the rt_mutex we were woken on
359  * @waiter:		the pre-initialized rt_mutex_waiter
360  *
361  * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
362  * rt_mutex_wait_proxy_lock().
363  *
364  * Unless we acquired the lock; we're still enqueued on the wait-list and can
365  * in fact still be granted ownership until we're removed. Therefore we can
366  * find we are in fact the owner and must disregard the
367  * rt_mutex_wait_proxy_lock() failure.
368  *
369  * Returns:
370  *  true  - did the cleanup, we done.
371  *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
372  *          caller should disregards its return value.
373  *
374  * Special API call for PI-futex support
375  */
376 bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
377 					 struct rt_mutex_waiter *waiter)
378 {
379 	bool cleanup = false;
380 
381 	raw_spin_lock_irq(&lock->wait_lock);
382 	/*
383 	 * Do an unconditional try-lock, this deals with the lock stealing
384 	 * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
385 	 * sets a NULL owner.
386 	 *
387 	 * We're not interested in the return value, because the subsequent
388 	 * test on rt_mutex_owner() will infer that. If the trylock succeeded,
389 	 * we will own the lock and it will have removed the waiter. If we
390 	 * failed the trylock, we're still not owner and we need to remove
391 	 * ourselves.
392 	 */
393 	try_to_take_rt_mutex(lock, current, waiter);
394 	/*
395 	 * Unless we're the owner; we're still enqueued on the wait_list.
396 	 * So check if we became owner, if not, take us off the wait_list.
397 	 */
398 	if (rt_mutex_owner(lock) != current) {
399 		remove_waiter(lock, waiter);
400 		cleanup = true;
401 	}
402 	/*
403 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
404 	 * have to fix that up.
405 	 */
406 	fixup_rt_mutex_waiters(lock);
407 
408 	raw_spin_unlock_irq(&lock->wait_lock);
409 
410 	return cleanup;
411 }
412 
413 /*
414  * Recheck the pi chain, in case we got a priority setting
415  *
416  * Called from sched_setscheduler
417  */
418 void __sched rt_mutex_adjust_pi(struct task_struct *task)
419 {
420 	struct rt_mutex_waiter *waiter;
421 	struct rt_mutex_base *next_lock;
422 	unsigned long flags;
423 
424 	raw_spin_lock_irqsave(&task->pi_lock, flags);
425 
426 	waiter = task->pi_blocked_on;
427 	if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
428 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
429 		return;
430 	}
431 	next_lock = waiter->lock;
432 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
433 
434 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
435 	get_task_struct(task);
436 
437 	rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
438 				   next_lock, NULL, task);
439 }
440 
441 /*
442  * Performs the wakeup of the top-waiter and re-enables preemption.
443  */
444 void __sched rt_mutex_postunlock(struct wake_q_head *wake_q)
445 {
446 	wake_up_q(wake_q);
447 
448 	/* Pairs with preempt_disable() in mark_wakeup_next_waiter() */
449 	preempt_enable();
450 }
451 
452 #ifdef CONFIG_DEBUG_RT_MUTEXES
453 void rt_mutex_debug_task_free(struct task_struct *task)
454 {
455 	DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
456 	DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
457 }
458 #endif
459