xref: /linux-6.15/kernel/locking/rtmutex_api.c (revision 531ae4b0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * rtmutex API
4  */
5 #include <linux/spinlock.h>
6 #include <linux/export.h>
7 
8 #include "rtmutex.c"
9 
10 /*
11  * Max number of times we'll walk the boosting chain:
12  */
13 int max_lock_depth = 1024;
14 
15 /*
16  * Debug aware fast / slowpath lock,trylock,unlock
17  *
18  * The atomic acquire/release ops are compiled away, when either the
19  * architecture does not support cmpxchg or when debugging is enabled.
20  */
21 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
22 						  unsigned int state,
23 						  unsigned int subclass)
24 {
25 	int ret;
26 
27 	might_sleep();
28 	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
29 	ret = __rt_mutex_lock(lock, state);
30 	if (ret)
31 		mutex_release(&lock->dep_map, _RET_IP_);
32 	return ret;
33 }
34 
35 #ifdef CONFIG_DEBUG_LOCK_ALLOC
36 /**
37  * rt_mutex_lock_nested - lock a rt_mutex
38  *
39  * @lock: the rt_mutex to be locked
40  * @subclass: the lockdep subclass
41  */
42 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
43 {
44 	__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
45 }
46 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
47 
48 #else /* !CONFIG_DEBUG_LOCK_ALLOC */
49 
50 /**
51  * rt_mutex_lock - lock a rt_mutex
52  *
53  * @lock: the rt_mutex to be locked
54  */
55 void __sched rt_mutex_lock(struct rt_mutex *lock)
56 {
57 	__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
58 }
59 EXPORT_SYMBOL_GPL(rt_mutex_lock);
60 #endif
61 
62 /**
63  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
64  *
65  * @lock:		the rt_mutex to be locked
66  *
67  * Returns:
68  *  0		on success
69  * -EINTR	when interrupted by a signal
70  */
71 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
72 {
73 	return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
74 }
75 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
76 
77 /**
78  * rt_mutex_trylock - try to lock a rt_mutex
79  *
80  * @lock:	the rt_mutex to be locked
81  *
82  * This function can only be called in thread context. It's safe to call it
83  * from atomic regions, but not from hard or soft interrupt context.
84  *
85  * Returns:
86  *  1 on success
87  *  0 on contention
88  */
89 int __sched rt_mutex_trylock(struct rt_mutex *lock)
90 {
91 	int ret;
92 
93 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
94 		return 0;
95 
96 	ret = __rt_mutex_trylock(lock);
97 	if (ret)
98 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
99 
100 	return ret;
101 }
102 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
103 
104 /**
105  * rt_mutex_unlock - unlock a rt_mutex
106  *
107  * @lock: the rt_mutex to be unlocked
108  */
109 void __sched rt_mutex_unlock(struct rt_mutex *lock)
110 {
111 	mutex_release(&lock->dep_map, _RET_IP_);
112 	__rt_mutex_unlock(lock);
113 }
114 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
115 
116 /*
117  * Futex variants, must not use fastpath.
118  */
119 int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
120 {
121 	return rt_mutex_slowtrylock(lock);
122 }
123 
124 int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
125 {
126 	return __rt_mutex_slowtrylock(lock);
127 }
128 
129 /**
130  * __rt_mutex_futex_unlock - Futex variant, that since futex variants
131  * do not use the fast-path, can be simple and will not need to retry.
132  *
133  * @lock:	The rt_mutex to be unlocked
134  * @wake_q:	The wake queue head from which to get the next lock waiter
135  */
136 bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
137 				     struct wake_q_head *wake_q)
138 {
139 	lockdep_assert_held(&lock->wait_lock);
140 
141 	debug_rt_mutex_unlock(lock);
142 
143 	if (!rt_mutex_has_waiters(lock)) {
144 		lock->owner = NULL;
145 		return false; /* done */
146 	}
147 
148 	/*
149 	 * We've already deboosted, mark_wakeup_next_waiter() will
150 	 * retain preempt_disabled when we drop the wait_lock, to
151 	 * avoid inversion prior to the wakeup.  preempt_disable()
152 	 * therein pairs with rt_mutex_postunlock().
153 	 */
154 	mark_wakeup_next_waiter(wake_q, lock);
155 
156 	return true; /* call postunlock() */
157 }
158 
159 void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
160 {
161 	DEFINE_WAKE_Q(wake_q);
162 	unsigned long flags;
163 	bool postunlock;
164 
165 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
166 	postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
167 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
168 
169 	if (postunlock)
170 		rt_mutex_postunlock(&wake_q);
171 }
172 
173 /**
174  * __rt_mutex_init - initialize the rt_mutex
175  *
176  * @lock:	The rt_mutex to be initialized
177  * @name:	The lock name used for debugging
178  * @key:	The lock class key used for debugging
179  *
180  * Initialize the rt_mutex to unlocked state.
181  *
182  * Initializing of a locked rt_mutex is not allowed
183  */
184 void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
185 		     struct lock_class_key *key)
186 {
187 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
188 	lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
189 
190 	__rt_mutex_basic_init(lock);
191 }
192 EXPORT_SYMBOL_GPL(__rt_mutex_init);
193 
194 /**
195  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
196  *				proxy owner
197  *
198  * @lock:	the rt_mutex to be locked
199  * @proxy_owner:the task to set as owner
200  *
201  * No locking. Caller has to do serializing itself
202  *
203  * Special API call for PI-futex support. This initializes the rtmutex and
204  * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
205  * possible at this point because the pi_state which contains the rtmutex
206  * is not yet visible to other tasks.
207  */
208 void __sched rt_mutex_init_proxy_locked(struct rt_mutex *lock,
209 					struct task_struct *proxy_owner)
210 {
211 	__rt_mutex_basic_init(lock);
212 	rt_mutex_set_owner(lock, proxy_owner);
213 }
214 
215 /**
216  * rt_mutex_proxy_unlock - release a lock on behalf of owner
217  *
218  * @lock:	the rt_mutex to be locked
219  *
220  * No locking. Caller has to do serializing itself
221  *
222  * Special API call for PI-futex support. This just cleans up the rtmutex
223  * (debugging) state. Concurrent operations on this rt_mutex are not
224  * possible because it belongs to the pi_state which is about to be freed
225  * and it is not longer visible to other tasks.
226  */
227 void __sched rt_mutex_proxy_unlock(struct rt_mutex *lock)
228 {
229 	debug_rt_mutex_proxy_unlock(lock);
230 	rt_mutex_set_owner(lock, NULL);
231 }
232 
233 /**
234  * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
235  * @lock:		the rt_mutex to take
236  * @waiter:		the pre-initialized rt_mutex_waiter
237  * @task:		the task to prepare
238  *
239  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
240  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
241  *
242  * NOTE: does _NOT_ remove the @waiter on failure; must either call
243  * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
244  *
245  * Returns:
246  *  0 - task blocked on lock
247  *  1 - acquired the lock for task, caller should wake it up
248  * <0 - error
249  *
250  * Special API call for PI-futex support.
251  */
252 int __sched __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
253 					struct rt_mutex_waiter *waiter,
254 					struct task_struct *task)
255 {
256 	int ret;
257 
258 	lockdep_assert_held(&lock->wait_lock);
259 
260 	if (try_to_take_rt_mutex(lock, task, NULL))
261 		return 1;
262 
263 	/* We enforce deadlock detection for futexes */
264 	ret = task_blocks_on_rt_mutex(lock, waiter, task,
265 				      RT_MUTEX_FULL_CHAINWALK);
266 
267 	if (ret && !rt_mutex_owner(lock)) {
268 		/*
269 		 * Reset the return value. We might have
270 		 * returned with -EDEADLK and the owner
271 		 * released the lock while we were walking the
272 		 * pi chain.  Let the waiter sort it out.
273 		 */
274 		ret = 0;
275 	}
276 
277 	return ret;
278 }
279 
280 /**
281  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
282  * @lock:		the rt_mutex to take
283  * @waiter:		the pre-initialized rt_mutex_waiter
284  * @task:		the task to prepare
285  *
286  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
287  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
288  *
289  * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
290  * on failure.
291  *
292  * Returns:
293  *  0 - task blocked on lock
294  *  1 - acquired the lock for task, caller should wake it up
295  * <0 - error
296  *
297  * Special API call for PI-futex support.
298  */
299 int __sched rt_mutex_start_proxy_lock(struct rt_mutex *lock,
300 				      struct rt_mutex_waiter *waiter,
301 				      struct task_struct *task)
302 {
303 	int ret;
304 
305 	raw_spin_lock_irq(&lock->wait_lock);
306 	ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
307 	if (unlikely(ret))
308 		remove_waiter(lock, waiter);
309 	raw_spin_unlock_irq(&lock->wait_lock);
310 
311 	return ret;
312 }
313 
314 /**
315  * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
316  * @lock:		the rt_mutex we were woken on
317  * @to:			the timeout, null if none. hrtimer should already have
318  *			been started.
319  * @waiter:		the pre-initialized rt_mutex_waiter
320  *
321  * Wait for the lock acquisition started on our behalf by
322  * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
323  * rt_mutex_cleanup_proxy_lock().
324  *
325  * Returns:
326  *  0 - success
327  * <0 - error, one of -EINTR, -ETIMEDOUT
328  *
329  * Special API call for PI-futex support
330  */
331 int __sched rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
332 				     struct hrtimer_sleeper *to,
333 				     struct rt_mutex_waiter *waiter)
334 {
335 	int ret;
336 
337 	raw_spin_lock_irq(&lock->wait_lock);
338 	/* sleep on the mutex */
339 	set_current_state(TASK_INTERRUPTIBLE);
340 	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
341 	/*
342 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
343 	 * have to fix that up.
344 	 */
345 	fixup_rt_mutex_waiters(lock);
346 	raw_spin_unlock_irq(&lock->wait_lock);
347 
348 	return ret;
349 }
350 
351 /**
352  * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
353  * @lock:		the rt_mutex we were woken on
354  * @waiter:		the pre-initialized rt_mutex_waiter
355  *
356  * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
357  * rt_mutex_wait_proxy_lock().
358  *
359  * Unless we acquired the lock; we're still enqueued on the wait-list and can
360  * in fact still be granted ownership until we're removed. Therefore we can
361  * find we are in fact the owner and must disregard the
362  * rt_mutex_wait_proxy_lock() failure.
363  *
364  * Returns:
365  *  true  - did the cleanup, we done.
366  *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
367  *          caller should disregards its return value.
368  *
369  * Special API call for PI-futex support
370  */
371 bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
372 					 struct rt_mutex_waiter *waiter)
373 {
374 	bool cleanup = false;
375 
376 	raw_spin_lock_irq(&lock->wait_lock);
377 	/*
378 	 * Do an unconditional try-lock, this deals with the lock stealing
379 	 * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
380 	 * sets a NULL owner.
381 	 *
382 	 * We're not interested in the return value, because the subsequent
383 	 * test on rt_mutex_owner() will infer that. If the trylock succeeded,
384 	 * we will own the lock and it will have removed the waiter. If we
385 	 * failed the trylock, we're still not owner and we need to remove
386 	 * ourselves.
387 	 */
388 	try_to_take_rt_mutex(lock, current, waiter);
389 	/*
390 	 * Unless we're the owner; we're still enqueued on the wait_list.
391 	 * So check if we became owner, if not, take us off the wait_list.
392 	 */
393 	if (rt_mutex_owner(lock) != current) {
394 		remove_waiter(lock, waiter);
395 		cleanup = true;
396 	}
397 	/*
398 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
399 	 * have to fix that up.
400 	 */
401 	fixup_rt_mutex_waiters(lock);
402 
403 	raw_spin_unlock_irq(&lock->wait_lock);
404 
405 	return cleanup;
406 }
407 
408 /*
409  * Recheck the pi chain, in case we got a priority setting
410  *
411  * Called from sched_setscheduler
412  */
413 void __sched rt_mutex_adjust_pi(struct task_struct *task)
414 {
415 	struct rt_mutex_waiter *waiter;
416 	struct rt_mutex *next_lock;
417 	unsigned long flags;
418 
419 	raw_spin_lock_irqsave(&task->pi_lock, flags);
420 
421 	waiter = task->pi_blocked_on;
422 	if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
423 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
424 		return;
425 	}
426 	next_lock = waiter->lock;
427 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
428 
429 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
430 	get_task_struct(task);
431 
432 	rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
433 				   next_lock, NULL, task);
434 }
435 
436 /*
437  * Performs the wakeup of the top-waiter and re-enables preemption.
438  */
439 void __sched rt_mutex_postunlock(struct wake_q_head *wake_q)
440 {
441 	wake_up_q(wake_q);
442 
443 	/* Pairs with preempt_disable() in mark_wakeup_next_waiter() */
444 	preempt_enable();
445 }
446 
447 #ifdef CONFIG_DEBUG_RT_MUTEXES
448 void rt_mutex_debug_task_free(struct task_struct *task)
449 {
450 	DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
451 	DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
452 }
453 #endif
454