xref: /linux-6.15/kernel/locking/mutex.c (revision ad90880d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/locking/mutex.c
4  *
5  * Mutexes: blocking mutual exclusion locks
6  *
7  * Started by Ingo Molnar:
8  *
9  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <[email protected]>
10  *
11  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12  * David Howells for suggestions and improvements.
13  *
14  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15  *    from the -rt tree, where it was originally implemented for rtmutexes
16  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17  *    and Sven Dietrich.
18  *
19  * Also see Documentation/locking/mutex-design.rst.
20  */
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
32 
33 #ifdef CONFIG_DEBUG_MUTEXES
34 # include "mutex-debug.h"
35 #else
36 # include "mutex.h"
37 #endif
38 
39 void
40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
41 {
42 	atomic_long_set(&lock->owner, 0);
43 	spin_lock_init(&lock->wait_lock);
44 	INIT_LIST_HEAD(&lock->wait_list);
45 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
46 	osq_lock_init(&lock->osq);
47 #endif
48 
49 	debug_mutex_init(lock, name, key);
50 }
51 EXPORT_SYMBOL(__mutex_init);
52 
53 /*
54  * @owner: contains: 'struct task_struct *' to the current lock owner,
55  * NULL means not owned. Since task_struct pointers are aligned at
56  * at least L1_CACHE_BYTES, we have low bits to store extra state.
57  *
58  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
59  * Bit1 indicates unlock needs to hand the lock to the top-waiter
60  * Bit2 indicates handoff has been done and we're waiting for pickup.
61  */
62 #define MUTEX_FLAG_WAITERS	0x01
63 #define MUTEX_FLAG_HANDOFF	0x02
64 #define MUTEX_FLAG_PICKUP	0x04
65 
66 #define MUTEX_FLAGS		0x07
67 
68 /*
69  * Internal helper function; C doesn't allow us to hide it :/
70  *
71  * DO NOT USE (outside of mutex code).
72  */
73 static inline struct task_struct *__mutex_owner(struct mutex *lock)
74 {
75 	return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
76 }
77 
78 static inline struct task_struct *__owner_task(unsigned long owner)
79 {
80 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
81 }
82 
83 bool mutex_is_locked(struct mutex *lock)
84 {
85 	return __mutex_owner(lock) != NULL;
86 }
87 EXPORT_SYMBOL(mutex_is_locked);
88 
89 static inline unsigned long __owner_flags(unsigned long owner)
90 {
91 	return owner & MUTEX_FLAGS;
92 }
93 
94 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
95 {
96 	unsigned long owner, curr = (unsigned long)current;
97 
98 	owner = atomic_long_read(&lock->owner);
99 	for (;;) { /* must loop, can race against a flag */
100 		unsigned long flags = __owner_flags(owner);
101 		unsigned long task = owner & ~MUTEX_FLAGS;
102 
103 		if (task) {
104 			if (flags & MUTEX_FLAG_PICKUP) {
105 				if (task != curr)
106 					break;
107 				flags &= ~MUTEX_FLAG_PICKUP;
108 			} else if (handoff) {
109 				if (flags & MUTEX_FLAG_HANDOFF)
110 					break;
111 				flags |= MUTEX_FLAG_HANDOFF;
112 			} else {
113 				break;
114 			}
115 		} else {
116 #ifdef CONFIG_DEBUG_MUTEXES
117 			DEBUG_LOCKS_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
118 #endif
119 			task = curr;
120 		}
121 
122 		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
123 			if (task == curr)
124 				return NULL;
125 			break;
126 		}
127 	}
128 
129 	return __owner_task(owner);
130 }
131 
132 /*
133  * Trylock or set HANDOFF
134  */
135 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
136 {
137 	return !__mutex_trylock_common(lock, handoff);
138 }
139 
140 /*
141  * Actual trylock that will work on any unlocked state.
142  */
143 static inline bool __mutex_trylock(struct mutex *lock)
144 {
145 	return !__mutex_trylock_common(lock, false);
146 }
147 
148 #ifndef CONFIG_DEBUG_LOCK_ALLOC
149 /*
150  * Lockdep annotations are contained to the slow paths for simplicity.
151  * There is nothing that would stop spreading the lockdep annotations outwards
152  * except more code.
153  */
154 
155 /*
156  * Optimistic trylock that only works in the uncontended case. Make sure to
157  * follow with a __mutex_trylock() before failing.
158  */
159 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
160 {
161 	unsigned long curr = (unsigned long)current;
162 	unsigned long zero = 0UL;
163 
164 	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
165 		return true;
166 
167 	return false;
168 }
169 
170 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
171 {
172 	unsigned long curr = (unsigned long)current;
173 
174 	return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
175 }
176 #endif
177 
178 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
179 {
180 	atomic_long_or(flag, &lock->owner);
181 }
182 
183 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
184 {
185 	atomic_long_andnot(flag, &lock->owner);
186 }
187 
188 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
189 {
190 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
191 }
192 
193 /*
194  * Add @waiter to a given location in the lock wait_list and set the
195  * FLAG_WAITERS flag if it's the first waiter.
196  */
197 static void __sched
198 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
199 		   struct list_head *list)
200 {
201 	debug_mutex_add_waiter(lock, waiter, current);
202 
203 	list_add_tail(&waiter->list, list);
204 	if (__mutex_waiter_is_first(lock, waiter))
205 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
206 }
207 
208 /*
209  * Give up ownership to a specific task, when @task = NULL, this is equivalent
210  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
211  * WAITERS. Provides RELEASE semantics like a regular unlock, the
212  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
213  */
214 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
215 {
216 	unsigned long owner = atomic_long_read(&lock->owner);
217 
218 	for (;;) {
219 		unsigned long new;
220 
221 #ifdef CONFIG_DEBUG_MUTEXES
222 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
223 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
224 #endif
225 
226 		new = (owner & MUTEX_FLAG_WAITERS);
227 		new |= (unsigned long)task;
228 		if (task)
229 			new |= MUTEX_FLAG_PICKUP;
230 
231 		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
232 			break;
233 	}
234 }
235 
236 #ifndef CONFIG_DEBUG_LOCK_ALLOC
237 /*
238  * We split the mutex lock/unlock logic into separate fastpath and
239  * slowpath functions, to reduce the register pressure on the fastpath.
240  * We also put the fastpath first in the kernel image, to make sure the
241  * branch is predicted by the CPU as default-untaken.
242  */
243 static void __sched __mutex_lock_slowpath(struct mutex *lock);
244 
245 /**
246  * mutex_lock - acquire the mutex
247  * @lock: the mutex to be acquired
248  *
249  * Lock the mutex exclusively for this task. If the mutex is not
250  * available right now, it will sleep until it can get it.
251  *
252  * The mutex must later on be released by the same task that
253  * acquired it. Recursive locking is not allowed. The task
254  * may not exit without first unlocking the mutex. Also, kernel
255  * memory where the mutex resides must not be freed with
256  * the mutex still locked. The mutex must first be initialized
257  * (or statically defined) before it can be locked. memset()-ing
258  * the mutex to 0 is not allowed.
259  *
260  * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
261  * checks that will enforce the restrictions and will also do
262  * deadlock debugging)
263  *
264  * This function is similar to (but not equivalent to) down().
265  */
266 void __sched mutex_lock(struct mutex *lock)
267 {
268 	might_sleep();
269 
270 	if (!__mutex_trylock_fast(lock))
271 		__mutex_lock_slowpath(lock);
272 }
273 EXPORT_SYMBOL(mutex_lock);
274 #endif
275 
276 /*
277  * Wait-Die:
278  *   The newer transactions are killed when:
279  *     It (the new transaction) makes a request for a lock being held
280  *     by an older transaction.
281  *
282  * Wound-Wait:
283  *   The newer transactions are wounded when:
284  *     An older transaction makes a request for a lock being held by
285  *     the newer transaction.
286  */
287 
288 /*
289  * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
290  * it.
291  */
292 static __always_inline void
293 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
294 {
295 #ifdef CONFIG_DEBUG_MUTEXES
296 	/*
297 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
298 	 * but released with a normal mutex_unlock in this call.
299 	 *
300 	 * This should never happen, always use ww_mutex_unlock.
301 	 */
302 	DEBUG_LOCKS_WARN_ON(ww->ctx);
303 
304 	/*
305 	 * Not quite done after calling ww_acquire_done() ?
306 	 */
307 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
308 
309 	if (ww_ctx->contending_lock) {
310 		/*
311 		 * After -EDEADLK you tried to
312 		 * acquire a different ww_mutex? Bad!
313 		 */
314 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
315 
316 		/*
317 		 * You called ww_mutex_lock after receiving -EDEADLK,
318 		 * but 'forgot' to unlock everything else first?
319 		 */
320 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
321 		ww_ctx->contending_lock = NULL;
322 	}
323 
324 	/*
325 	 * Naughty, using a different class will lead to undefined behavior!
326 	 */
327 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
328 #endif
329 	ww_ctx->acquired++;
330 	ww->ctx = ww_ctx;
331 }
332 
333 /*
334  * Determine if context @a is 'after' context @b. IOW, @a is a younger
335  * transaction than @b and depending on algorithm either needs to wait for
336  * @b or die.
337  */
338 static inline bool __sched
339 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
340 {
341 
342 	return (signed long)(a->stamp - b->stamp) > 0;
343 }
344 
345 /*
346  * Wait-Die; wake a younger waiter context (when locks held) such that it can
347  * die.
348  *
349  * Among waiters with context, only the first one can have other locks acquired
350  * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
351  * __ww_mutex_check_kill() wake any but the earliest context.
352  */
353 static bool __sched
354 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
355 	       struct ww_acquire_ctx *ww_ctx)
356 {
357 	if (!ww_ctx->is_wait_die)
358 		return false;
359 
360 	if (waiter->ww_ctx->acquired > 0 &&
361 			__ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
362 		debug_mutex_wake_waiter(lock, waiter);
363 		wake_up_process(waiter->task);
364 	}
365 
366 	return true;
367 }
368 
369 /*
370  * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
371  *
372  * Wound the lock holder if there are waiters with older transactions than
373  * the lock holders. Even if multiple waiters may wound the lock holder,
374  * it's sufficient that only one does.
375  */
376 static bool __ww_mutex_wound(struct mutex *lock,
377 			     struct ww_acquire_ctx *ww_ctx,
378 			     struct ww_acquire_ctx *hold_ctx)
379 {
380 	struct task_struct *owner = __mutex_owner(lock);
381 
382 	lockdep_assert_held(&lock->wait_lock);
383 
384 	/*
385 	 * Possible through __ww_mutex_add_waiter() when we race with
386 	 * ww_mutex_set_context_fastpath(). In that case we'll get here again
387 	 * through __ww_mutex_check_waiters().
388 	 */
389 	if (!hold_ctx)
390 		return false;
391 
392 	/*
393 	 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
394 	 * it cannot go away because we'll have FLAG_WAITERS set and hold
395 	 * wait_lock.
396 	 */
397 	if (!owner)
398 		return false;
399 
400 	if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
401 		hold_ctx->wounded = 1;
402 
403 		/*
404 		 * wake_up_process() paired with set_current_state()
405 		 * inserts sufficient barriers to make sure @owner either sees
406 		 * it's wounded in __ww_mutex_check_kill() or has a
407 		 * wakeup pending to re-read the wounded state.
408 		 */
409 		if (owner != current)
410 			wake_up_process(owner);
411 
412 		return true;
413 	}
414 
415 	return false;
416 }
417 
418 /*
419  * We just acquired @lock under @ww_ctx, if there are later contexts waiting
420  * behind us on the wait-list, check if they need to die, or wound us.
421  *
422  * See __ww_mutex_add_waiter() for the list-order construction; basically the
423  * list is ordered by stamp, smallest (oldest) first.
424  *
425  * This relies on never mixing wait-die/wound-wait on the same wait-list;
426  * which is currently ensured by that being a ww_class property.
427  *
428  * The current task must not be on the wait list.
429  */
430 static void __sched
431 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
432 {
433 	struct mutex_waiter *cur;
434 
435 	lockdep_assert_held(&lock->wait_lock);
436 
437 	list_for_each_entry(cur, &lock->wait_list, list) {
438 		if (!cur->ww_ctx)
439 			continue;
440 
441 		if (__ww_mutex_die(lock, cur, ww_ctx) ||
442 		    __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
443 			break;
444 	}
445 }
446 
447 /*
448  * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
449  * and wake up any waiters so they can recheck.
450  */
451 static __always_inline void
452 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
453 {
454 	ww_mutex_lock_acquired(lock, ctx);
455 
456 	/*
457 	 * The lock->ctx update should be visible on all cores before
458 	 * the WAITERS check is done, otherwise contended waiters might be
459 	 * missed. The contended waiters will either see ww_ctx == NULL
460 	 * and keep spinning, or it will acquire wait_lock, add itself
461 	 * to waiter list and sleep.
462 	 */
463 	smp_mb(); /* See comments above and below. */
464 
465 	/*
466 	 * [W] ww->ctx = ctx	    [W] MUTEX_FLAG_WAITERS
467 	 *     MB		        MB
468 	 * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
469 	 *
470 	 * The memory barrier above pairs with the memory barrier in
471 	 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
472 	 * and/or !empty list.
473 	 */
474 	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
475 		return;
476 
477 	/*
478 	 * Uh oh, we raced in fastpath, check if any of the waiters need to
479 	 * die or wound us.
480 	 */
481 	spin_lock(&lock->base.wait_lock);
482 	__ww_mutex_check_waiters(&lock->base, ctx);
483 	spin_unlock(&lock->base.wait_lock);
484 }
485 
486 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
487 
488 /*
489  * Trylock variant that returns the owning task on failure.
490  */
491 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
492 {
493 	return __mutex_trylock_common(lock, false);
494 }
495 
496 static inline
497 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
498 			    struct mutex_waiter *waiter)
499 {
500 	struct ww_mutex *ww;
501 
502 	ww = container_of(lock, struct ww_mutex, base);
503 
504 	/*
505 	 * If ww->ctx is set the contents are undefined, only
506 	 * by acquiring wait_lock there is a guarantee that
507 	 * they are not invalid when reading.
508 	 *
509 	 * As such, when deadlock detection needs to be
510 	 * performed the optimistic spinning cannot be done.
511 	 *
512 	 * Check this in every inner iteration because we may
513 	 * be racing against another thread's ww_mutex_lock.
514 	 */
515 	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
516 		return false;
517 
518 	/*
519 	 * If we aren't on the wait list yet, cancel the spin
520 	 * if there are waiters. We want  to avoid stealing the
521 	 * lock from a waiter with an earlier stamp, since the
522 	 * other thread may already own a lock that we also
523 	 * need.
524 	 */
525 	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
526 		return false;
527 
528 	/*
529 	 * Similarly, stop spinning if we are no longer the
530 	 * first waiter.
531 	 */
532 	if (waiter && !__mutex_waiter_is_first(lock, waiter))
533 		return false;
534 
535 	return true;
536 }
537 
538 /*
539  * Look out! "owner" is an entirely speculative pointer access and not
540  * reliable.
541  *
542  * "noinline" so that this function shows up on perf profiles.
543  */
544 static noinline
545 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
546 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
547 {
548 	bool ret = true;
549 
550 	rcu_read_lock();
551 	while (__mutex_owner(lock) == owner) {
552 		/*
553 		 * Ensure we emit the owner->on_cpu, dereference _after_
554 		 * checking lock->owner still matches owner. If that fails,
555 		 * owner might point to freed memory. If it still matches,
556 		 * the rcu_read_lock() ensures the memory stays valid.
557 		 */
558 		barrier();
559 
560 		/*
561 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
562 		 */
563 		if (!owner->on_cpu || need_resched() ||
564 				vcpu_is_preempted(task_cpu(owner))) {
565 			ret = false;
566 			break;
567 		}
568 
569 		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
570 			ret = false;
571 			break;
572 		}
573 
574 		cpu_relax();
575 	}
576 	rcu_read_unlock();
577 
578 	return ret;
579 }
580 
581 /*
582  * Initial check for entering the mutex spinning loop
583  */
584 static inline int mutex_can_spin_on_owner(struct mutex *lock)
585 {
586 	struct task_struct *owner;
587 	int retval = 1;
588 
589 	if (need_resched())
590 		return 0;
591 
592 	rcu_read_lock();
593 	owner = __mutex_owner(lock);
594 
595 	/*
596 	 * As lock holder preemption issue, we both skip spinning if task is not
597 	 * on cpu or its cpu is preempted
598 	 */
599 	if (owner)
600 		retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
601 	rcu_read_unlock();
602 
603 	/*
604 	 * If lock->owner is not set, the mutex has been released. Return true
605 	 * such that we'll trylock in the spin path, which is a faster option
606 	 * than the blocking slow path.
607 	 */
608 	return retval;
609 }
610 
611 /*
612  * Optimistic spinning.
613  *
614  * We try to spin for acquisition when we find that the lock owner
615  * is currently running on a (different) CPU and while we don't
616  * need to reschedule. The rationale is that if the lock owner is
617  * running, it is likely to release the lock soon.
618  *
619  * The mutex spinners are queued up using MCS lock so that only one
620  * spinner can compete for the mutex. However, if mutex spinning isn't
621  * going to happen, there is no point in going through the lock/unlock
622  * overhead.
623  *
624  * Returns true when the lock was taken, otherwise false, indicating
625  * that we need to jump to the slowpath and sleep.
626  *
627  * The waiter flag is set to true if the spinner is a waiter in the wait
628  * queue. The waiter-spinner will spin on the lock directly and concurrently
629  * with the spinner at the head of the OSQ, if present, until the owner is
630  * changed to itself.
631  */
632 static __always_inline bool
633 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
634 		      struct mutex_waiter *waiter)
635 {
636 	if (!waiter) {
637 		/*
638 		 * The purpose of the mutex_can_spin_on_owner() function is
639 		 * to eliminate the overhead of osq_lock() and osq_unlock()
640 		 * in case spinning isn't possible. As a waiter-spinner
641 		 * is not going to take OSQ lock anyway, there is no need
642 		 * to call mutex_can_spin_on_owner().
643 		 */
644 		if (!mutex_can_spin_on_owner(lock))
645 			goto fail;
646 
647 		/*
648 		 * In order to avoid a stampede of mutex spinners trying to
649 		 * acquire the mutex all at once, the spinners need to take a
650 		 * MCS (queued) lock first before spinning on the owner field.
651 		 */
652 		if (!osq_lock(&lock->osq))
653 			goto fail;
654 	}
655 
656 	for (;;) {
657 		struct task_struct *owner;
658 
659 		/* Try to acquire the mutex... */
660 		owner = __mutex_trylock_or_owner(lock);
661 		if (!owner)
662 			break;
663 
664 		/*
665 		 * There's an owner, wait for it to either
666 		 * release the lock or go to sleep.
667 		 */
668 		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
669 			goto fail_unlock;
670 
671 		/*
672 		 * The cpu_relax() call is a compiler barrier which forces
673 		 * everything in this loop to be re-loaded. We don't need
674 		 * memory barriers as we'll eventually observe the right
675 		 * values at the cost of a few extra spins.
676 		 */
677 		cpu_relax();
678 	}
679 
680 	if (!waiter)
681 		osq_unlock(&lock->osq);
682 
683 	return true;
684 
685 
686 fail_unlock:
687 	if (!waiter)
688 		osq_unlock(&lock->osq);
689 
690 fail:
691 	/*
692 	 * If we fell out of the spin path because of need_resched(),
693 	 * reschedule now, before we try-lock the mutex. This avoids getting
694 	 * scheduled out right after we obtained the mutex.
695 	 */
696 	if (need_resched()) {
697 		/*
698 		 * We _should_ have TASK_RUNNING here, but just in case
699 		 * we do not, make it so, otherwise we might get stuck.
700 		 */
701 		__set_current_state(TASK_RUNNING);
702 		schedule_preempt_disabled();
703 	}
704 
705 	return false;
706 }
707 #else
708 static __always_inline bool
709 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
710 		      struct mutex_waiter *waiter)
711 {
712 	return false;
713 }
714 #endif
715 
716 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
717 
718 /**
719  * mutex_unlock - release the mutex
720  * @lock: the mutex to be released
721  *
722  * Unlock a mutex that has been locked by this task previously.
723  *
724  * This function must not be used in interrupt context. Unlocking
725  * of a not locked mutex is not allowed.
726  *
727  * This function is similar to (but not equivalent to) up().
728  */
729 void __sched mutex_unlock(struct mutex *lock)
730 {
731 #ifndef CONFIG_DEBUG_LOCK_ALLOC
732 	if (__mutex_unlock_fast(lock))
733 		return;
734 #endif
735 	__mutex_unlock_slowpath(lock, _RET_IP_);
736 }
737 EXPORT_SYMBOL(mutex_unlock);
738 
739 /**
740  * ww_mutex_unlock - release the w/w mutex
741  * @lock: the mutex to be released
742  *
743  * Unlock a mutex that has been locked by this task previously with any of the
744  * ww_mutex_lock* functions (with or without an acquire context). It is
745  * forbidden to release the locks after releasing the acquire context.
746  *
747  * This function must not be used in interrupt context. Unlocking
748  * of a unlocked mutex is not allowed.
749  */
750 void __sched ww_mutex_unlock(struct ww_mutex *lock)
751 {
752 	/*
753 	 * The unlocking fastpath is the 0->1 transition from 'locked'
754 	 * into 'unlocked' state:
755 	 */
756 	if (lock->ctx) {
757 #ifdef CONFIG_DEBUG_MUTEXES
758 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
759 #endif
760 		if (lock->ctx->acquired > 0)
761 			lock->ctx->acquired--;
762 		lock->ctx = NULL;
763 	}
764 
765 	mutex_unlock(&lock->base);
766 }
767 EXPORT_SYMBOL(ww_mutex_unlock);
768 
769 
770 static __always_inline int __sched
771 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
772 {
773 	if (ww_ctx->acquired > 0) {
774 #ifdef CONFIG_DEBUG_MUTEXES
775 		struct ww_mutex *ww;
776 
777 		ww = container_of(lock, struct ww_mutex, base);
778 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
779 		ww_ctx->contending_lock = ww;
780 #endif
781 		return -EDEADLK;
782 	}
783 
784 	return 0;
785 }
786 
787 
788 /*
789  * Check the wound condition for the current lock acquire.
790  *
791  * Wound-Wait: If we're wounded, kill ourself.
792  *
793  * Wait-Die: If we're trying to acquire a lock already held by an older
794  *           context, kill ourselves.
795  *
796  * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
797  * look at waiters before us in the wait-list.
798  */
799 static inline int __sched
800 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
801 		      struct ww_acquire_ctx *ctx)
802 {
803 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
804 	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
805 	struct mutex_waiter *cur;
806 
807 	if (ctx->acquired == 0)
808 		return 0;
809 
810 	if (!ctx->is_wait_die) {
811 		if (ctx->wounded)
812 			return __ww_mutex_kill(lock, ctx);
813 
814 		return 0;
815 	}
816 
817 	if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
818 		return __ww_mutex_kill(lock, ctx);
819 
820 	/*
821 	 * If there is a waiter in front of us that has a context, then its
822 	 * stamp is earlier than ours and we must kill ourself.
823 	 */
824 	cur = waiter;
825 	list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
826 		if (!cur->ww_ctx)
827 			continue;
828 
829 		return __ww_mutex_kill(lock, ctx);
830 	}
831 
832 	return 0;
833 }
834 
835 /*
836  * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
837  * first. Such that older contexts are preferred to acquire the lock over
838  * younger contexts.
839  *
840  * Waiters without context are interspersed in FIFO order.
841  *
842  * Furthermore, for Wait-Die kill ourself immediately when possible (there are
843  * older contexts already waiting) to avoid unnecessary waiting and for
844  * Wound-Wait ensure we wound the owning context when it is younger.
845  */
846 static inline int __sched
847 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
848 		      struct mutex *lock,
849 		      struct ww_acquire_ctx *ww_ctx)
850 {
851 	struct mutex_waiter *cur;
852 	struct list_head *pos;
853 	bool is_wait_die;
854 
855 	if (!ww_ctx) {
856 		__mutex_add_waiter(lock, waiter, &lock->wait_list);
857 		return 0;
858 	}
859 
860 	is_wait_die = ww_ctx->is_wait_die;
861 
862 	/*
863 	 * Add the waiter before the first waiter with a higher stamp.
864 	 * Waiters without a context are skipped to avoid starving
865 	 * them. Wait-Die waiters may die here. Wound-Wait waiters
866 	 * never die here, but they are sorted in stamp order and
867 	 * may wound the lock holder.
868 	 */
869 	pos = &lock->wait_list;
870 	list_for_each_entry_reverse(cur, &lock->wait_list, list) {
871 		if (!cur->ww_ctx)
872 			continue;
873 
874 		if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
875 			/*
876 			 * Wait-Die: if we find an older context waiting, there
877 			 * is no point in queueing behind it, as we'd have to
878 			 * die the moment it would acquire the lock.
879 			 */
880 			if (is_wait_die) {
881 				int ret = __ww_mutex_kill(lock, ww_ctx);
882 
883 				if (ret)
884 					return ret;
885 			}
886 
887 			break;
888 		}
889 
890 		pos = &cur->list;
891 
892 		/* Wait-Die: ensure younger waiters die. */
893 		__ww_mutex_die(lock, cur, ww_ctx);
894 	}
895 
896 	__mutex_add_waiter(lock, waiter, pos);
897 
898 	/*
899 	 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
900 	 * wound that such that we might proceed.
901 	 */
902 	if (!is_wait_die) {
903 		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
904 
905 		/*
906 		 * See ww_mutex_set_context_fastpath(). Orders setting
907 		 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
908 		 * such that either we or the fastpath will wound @ww->ctx.
909 		 */
910 		smp_mb();
911 		__ww_mutex_wound(lock, ww_ctx, ww->ctx);
912 	}
913 
914 	return 0;
915 }
916 
917 /*
918  * Lock a mutex (possibly interruptible), slowpath:
919  */
920 static __always_inline int __sched
921 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
922 		    struct lockdep_map *nest_lock, unsigned long ip,
923 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
924 {
925 	struct mutex_waiter waiter;
926 	struct ww_mutex *ww;
927 	int ret;
928 
929 	if (!use_ww_ctx)
930 		ww_ctx = NULL;
931 
932 	might_sleep();
933 
934 #ifdef CONFIG_DEBUG_MUTEXES
935 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
936 #endif
937 
938 	ww = container_of(lock, struct ww_mutex, base);
939 	if (ww_ctx) {
940 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
941 			return -EALREADY;
942 
943 		/*
944 		 * Reset the wounded flag after a kill. No other process can
945 		 * race and wound us here since they can't have a valid owner
946 		 * pointer if we don't have any locks held.
947 		 */
948 		if (ww_ctx->acquired == 0)
949 			ww_ctx->wounded = 0;
950 	}
951 
952 	preempt_disable();
953 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
954 
955 	if (__mutex_trylock(lock) ||
956 	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
957 		/* got the lock, yay! */
958 		lock_acquired(&lock->dep_map, ip);
959 		if (ww_ctx)
960 			ww_mutex_set_context_fastpath(ww, ww_ctx);
961 		preempt_enable();
962 		return 0;
963 	}
964 
965 	spin_lock(&lock->wait_lock);
966 	/*
967 	 * After waiting to acquire the wait_lock, try again.
968 	 */
969 	if (__mutex_trylock(lock)) {
970 		if (ww_ctx)
971 			__ww_mutex_check_waiters(lock, ww_ctx);
972 
973 		goto skip_wait;
974 	}
975 
976 	debug_mutex_lock_common(lock, &waiter);
977 
978 	lock_contended(&lock->dep_map, ip);
979 
980 	if (!use_ww_ctx) {
981 		/* add waiting tasks to the end of the waitqueue (FIFO): */
982 		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
983 
984 
985 #ifdef CONFIG_DEBUG_MUTEXES
986 		waiter.ww_ctx = MUTEX_POISON_WW_CTX;
987 #endif
988 	} else {
989 		/*
990 		 * Add in stamp order, waking up waiters that must kill
991 		 * themselves.
992 		 */
993 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
994 		if (ret)
995 			goto err_early_kill;
996 
997 		waiter.ww_ctx = ww_ctx;
998 	}
999 
1000 	waiter.task = current;
1001 
1002 	set_current_state(state);
1003 	for (;;) {
1004 		bool first;
1005 
1006 		/*
1007 		 * Once we hold wait_lock, we're serialized against
1008 		 * mutex_unlock() handing the lock off to us, do a trylock
1009 		 * before testing the error conditions to make sure we pick up
1010 		 * the handoff.
1011 		 */
1012 		if (__mutex_trylock(lock))
1013 			goto acquired;
1014 
1015 		/*
1016 		 * Check for signals and kill conditions while holding
1017 		 * wait_lock. This ensures the lock cancellation is ordered
1018 		 * against mutex_unlock() and wake-ups do not go missing.
1019 		 */
1020 		if (signal_pending_state(state, current)) {
1021 			ret = -EINTR;
1022 			goto err;
1023 		}
1024 
1025 		if (ww_ctx) {
1026 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1027 			if (ret)
1028 				goto err;
1029 		}
1030 
1031 		spin_unlock(&lock->wait_lock);
1032 		schedule_preempt_disabled();
1033 
1034 		first = __mutex_waiter_is_first(lock, &waiter);
1035 
1036 		set_current_state(state);
1037 		/*
1038 		 * Here we order against unlock; we must either see it change
1039 		 * state back to RUNNING and fall through the next schedule(),
1040 		 * or we must see its unlock and acquire.
1041 		 */
1042 		if (__mutex_trylock_or_handoff(lock, first) ||
1043 		    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
1044 			break;
1045 
1046 		spin_lock(&lock->wait_lock);
1047 	}
1048 	spin_lock(&lock->wait_lock);
1049 acquired:
1050 	__set_current_state(TASK_RUNNING);
1051 
1052 	if (ww_ctx) {
1053 		/*
1054 		 * Wound-Wait; we stole the lock (!first_waiter), check the
1055 		 * waiters as anyone might want to wound us.
1056 		 */
1057 		if (!ww_ctx->is_wait_die &&
1058 		    !__mutex_waiter_is_first(lock, &waiter))
1059 			__ww_mutex_check_waiters(lock, ww_ctx);
1060 	}
1061 
1062 	mutex_remove_waiter(lock, &waiter, current);
1063 	if (likely(list_empty(&lock->wait_list)))
1064 		__mutex_clear_flag(lock, MUTEX_FLAGS);
1065 
1066 	debug_mutex_free_waiter(&waiter);
1067 
1068 skip_wait:
1069 	/* got the lock - cleanup and rejoice! */
1070 	lock_acquired(&lock->dep_map, ip);
1071 
1072 	if (ww_ctx)
1073 		ww_mutex_lock_acquired(ww, ww_ctx);
1074 
1075 	spin_unlock(&lock->wait_lock);
1076 	preempt_enable();
1077 	return 0;
1078 
1079 err:
1080 	__set_current_state(TASK_RUNNING);
1081 	mutex_remove_waiter(lock, &waiter, current);
1082 err_early_kill:
1083 	spin_unlock(&lock->wait_lock);
1084 	debug_mutex_free_waiter(&waiter);
1085 	mutex_release(&lock->dep_map, ip);
1086 	preempt_enable();
1087 	return ret;
1088 }
1089 
1090 static int __sched
1091 __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1092 	     struct lockdep_map *nest_lock, unsigned long ip)
1093 {
1094 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1095 }
1096 
1097 static int __sched
1098 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1099 		struct lockdep_map *nest_lock, unsigned long ip,
1100 		struct ww_acquire_ctx *ww_ctx)
1101 {
1102 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1103 }
1104 
1105 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1106 void __sched
1107 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1108 {
1109 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1110 }
1111 
1112 EXPORT_SYMBOL_GPL(mutex_lock_nested);
1113 
1114 void __sched
1115 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1116 {
1117 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1118 }
1119 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1120 
1121 int __sched
1122 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1123 {
1124 	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1125 }
1126 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1127 
1128 int __sched
1129 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1130 {
1131 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1132 }
1133 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1134 
1135 void __sched
1136 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1137 {
1138 	int token;
1139 
1140 	might_sleep();
1141 
1142 	token = io_schedule_prepare();
1143 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1144 			    subclass, NULL, _RET_IP_, NULL, 0);
1145 	io_schedule_finish(token);
1146 }
1147 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1148 
1149 static inline int
1150 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1151 {
1152 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1153 	unsigned tmp;
1154 
1155 	if (ctx->deadlock_inject_countdown-- == 0) {
1156 		tmp = ctx->deadlock_inject_interval;
1157 		if (tmp > UINT_MAX/4)
1158 			tmp = UINT_MAX;
1159 		else
1160 			tmp = tmp*2 + tmp + tmp/2;
1161 
1162 		ctx->deadlock_inject_interval = tmp;
1163 		ctx->deadlock_inject_countdown = tmp;
1164 		ctx->contending_lock = lock;
1165 
1166 		ww_mutex_unlock(lock);
1167 
1168 		return -EDEADLK;
1169 	}
1170 #endif
1171 
1172 	return 0;
1173 }
1174 
1175 int __sched
1176 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1177 {
1178 	int ret;
1179 
1180 	might_sleep();
1181 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1182 			       0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1183 			       ctx);
1184 	if (!ret && ctx && ctx->acquired > 1)
1185 		return ww_mutex_deadlock_injection(lock, ctx);
1186 
1187 	return ret;
1188 }
1189 EXPORT_SYMBOL_GPL(ww_mutex_lock);
1190 
1191 int __sched
1192 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1193 {
1194 	int ret;
1195 
1196 	might_sleep();
1197 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1198 			      0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1199 			      ctx);
1200 
1201 	if (!ret && ctx && ctx->acquired > 1)
1202 		return ww_mutex_deadlock_injection(lock, ctx);
1203 
1204 	return ret;
1205 }
1206 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1207 
1208 #endif
1209 
1210 /*
1211  * Release the lock, slowpath:
1212  */
1213 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1214 {
1215 	struct task_struct *next = NULL;
1216 	DEFINE_WAKE_Q(wake_q);
1217 	unsigned long owner;
1218 
1219 	mutex_release(&lock->dep_map, ip);
1220 
1221 	/*
1222 	 * Release the lock before (potentially) taking the spinlock such that
1223 	 * other contenders can get on with things ASAP.
1224 	 *
1225 	 * Except when HANDOFF, in that case we must not clear the owner field,
1226 	 * but instead set it to the top waiter.
1227 	 */
1228 	owner = atomic_long_read(&lock->owner);
1229 	for (;;) {
1230 #ifdef CONFIG_DEBUG_MUTEXES
1231 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1232 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1233 #endif
1234 
1235 		if (owner & MUTEX_FLAG_HANDOFF)
1236 			break;
1237 
1238 		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
1239 			if (owner & MUTEX_FLAG_WAITERS)
1240 				break;
1241 
1242 			return;
1243 		}
1244 	}
1245 
1246 	spin_lock(&lock->wait_lock);
1247 	debug_mutex_unlock(lock);
1248 	if (!list_empty(&lock->wait_list)) {
1249 		/* get the first entry from the wait-list: */
1250 		struct mutex_waiter *waiter =
1251 			list_first_entry(&lock->wait_list,
1252 					 struct mutex_waiter, list);
1253 
1254 		next = waiter->task;
1255 
1256 		debug_mutex_wake_waiter(lock, waiter);
1257 		wake_q_add(&wake_q, next);
1258 	}
1259 
1260 	if (owner & MUTEX_FLAG_HANDOFF)
1261 		__mutex_handoff(lock, next);
1262 
1263 	spin_unlock(&lock->wait_lock);
1264 
1265 	wake_up_q(&wake_q);
1266 }
1267 
1268 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1269 /*
1270  * Here come the less common (and hence less performance-critical) APIs:
1271  * mutex_lock_interruptible() and mutex_trylock().
1272  */
1273 static noinline int __sched
1274 __mutex_lock_killable_slowpath(struct mutex *lock);
1275 
1276 static noinline int __sched
1277 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1278 
1279 /**
1280  * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1281  * @lock: The mutex to be acquired.
1282  *
1283  * Lock the mutex like mutex_lock().  If a signal is delivered while the
1284  * process is sleeping, this function will return without acquiring the
1285  * mutex.
1286  *
1287  * Context: Process context.
1288  * Return: 0 if the lock was successfully acquired or %-EINTR if a
1289  * signal arrived.
1290  */
1291 int __sched mutex_lock_interruptible(struct mutex *lock)
1292 {
1293 	might_sleep();
1294 
1295 	if (__mutex_trylock_fast(lock))
1296 		return 0;
1297 
1298 	return __mutex_lock_interruptible_slowpath(lock);
1299 }
1300 
1301 EXPORT_SYMBOL(mutex_lock_interruptible);
1302 
1303 /**
1304  * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1305  * @lock: The mutex to be acquired.
1306  *
1307  * Lock the mutex like mutex_lock().  If a signal which will be fatal to
1308  * the current process is delivered while the process is sleeping, this
1309  * function will return without acquiring the mutex.
1310  *
1311  * Context: Process context.
1312  * Return: 0 if the lock was successfully acquired or %-EINTR if a
1313  * fatal signal arrived.
1314  */
1315 int __sched mutex_lock_killable(struct mutex *lock)
1316 {
1317 	might_sleep();
1318 
1319 	if (__mutex_trylock_fast(lock))
1320 		return 0;
1321 
1322 	return __mutex_lock_killable_slowpath(lock);
1323 }
1324 EXPORT_SYMBOL(mutex_lock_killable);
1325 
1326 /**
1327  * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1328  * @lock: The mutex to be acquired.
1329  *
1330  * Lock the mutex like mutex_lock().  While the task is waiting for this
1331  * mutex, it will be accounted as being in the IO wait state by the
1332  * scheduler.
1333  *
1334  * Context: Process context.
1335  */
1336 void __sched mutex_lock_io(struct mutex *lock)
1337 {
1338 	int token;
1339 
1340 	token = io_schedule_prepare();
1341 	mutex_lock(lock);
1342 	io_schedule_finish(token);
1343 }
1344 EXPORT_SYMBOL_GPL(mutex_lock_io);
1345 
1346 static noinline void __sched
1347 __mutex_lock_slowpath(struct mutex *lock)
1348 {
1349 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1350 }
1351 
1352 static noinline int __sched
1353 __mutex_lock_killable_slowpath(struct mutex *lock)
1354 {
1355 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1356 }
1357 
1358 static noinline int __sched
1359 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1360 {
1361 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1362 }
1363 
1364 static noinline int __sched
1365 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1366 {
1367 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1368 			       _RET_IP_, ctx);
1369 }
1370 
1371 static noinline int __sched
1372 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1373 					    struct ww_acquire_ctx *ctx)
1374 {
1375 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1376 			       _RET_IP_, ctx);
1377 }
1378 
1379 #endif
1380 
1381 /**
1382  * mutex_trylock - try to acquire the mutex, without waiting
1383  * @lock: the mutex to be acquired
1384  *
1385  * Try to acquire the mutex atomically. Returns 1 if the mutex
1386  * has been acquired successfully, and 0 on contention.
1387  *
1388  * NOTE: this function follows the spin_trylock() convention, so
1389  * it is negated from the down_trylock() return values! Be careful
1390  * about this when converting semaphore users to mutexes.
1391  *
1392  * This function must not be used in interrupt context. The
1393  * mutex must be released by the same task that acquired it.
1394  */
1395 int __sched mutex_trylock(struct mutex *lock)
1396 {
1397 	bool locked;
1398 
1399 #ifdef CONFIG_DEBUG_MUTEXES
1400 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1401 #endif
1402 
1403 	locked = __mutex_trylock(lock);
1404 	if (locked)
1405 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1406 
1407 	return locked;
1408 }
1409 EXPORT_SYMBOL(mutex_trylock);
1410 
1411 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1412 int __sched
1413 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1414 {
1415 	might_sleep();
1416 
1417 	if (__mutex_trylock_fast(&lock->base)) {
1418 		if (ctx)
1419 			ww_mutex_set_context_fastpath(lock, ctx);
1420 		return 0;
1421 	}
1422 
1423 	return __ww_mutex_lock_slowpath(lock, ctx);
1424 }
1425 EXPORT_SYMBOL(ww_mutex_lock);
1426 
1427 int __sched
1428 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1429 {
1430 	might_sleep();
1431 
1432 	if (__mutex_trylock_fast(&lock->base)) {
1433 		if (ctx)
1434 			ww_mutex_set_context_fastpath(lock, ctx);
1435 		return 0;
1436 	}
1437 
1438 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1439 }
1440 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1441 
1442 #endif
1443 
1444 /**
1445  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1446  * @cnt: the atomic which we are to dec
1447  * @lock: the mutex to return holding if we dec to 0
1448  *
1449  * return true and hold lock if we dec to 0, return false otherwise
1450  */
1451 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1452 {
1453 	/* dec if we can't possibly hit 0 */
1454 	if (atomic_add_unless(cnt, -1, 1))
1455 		return 0;
1456 	/* we might hit 0, so take the lock */
1457 	mutex_lock(lock);
1458 	if (!atomic_dec_and_test(cnt)) {
1459 		/* when we actually did the dec, we didn't hit 0 */
1460 		mutex_unlock(lock);
1461 		return 0;
1462 	}
1463 	/* we hit 0, and we hold the lock */
1464 	return 1;
1465 }
1466 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1467