xref: /linux-6.15/kernel/locking/mutex.c (revision aaa77de1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/locking/mutex.c
4  *
5  * Mutexes: blocking mutual exclusion locks
6  *
7  * Started by Ingo Molnar:
8  *
9  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <[email protected]>
10  *
11  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12  * David Howells for suggestions and improvements.
13  *
14  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15  *    from the -rt tree, where it was originally implemented for rtmutexes
16  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17  *    and Sven Dietrich.
18  *
19  * Also see Documentation/locking/mutex-design.rst.
20  */
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
32 
33 #include "mutex.h"
34 
35 #ifdef CONFIG_DEBUG_MUTEXES
36 # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
37 #else
38 # define MUTEX_WARN_ON(cond)
39 #endif
40 
41 void
42 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
43 {
44 	atomic_long_set(&lock->owner, 0);
45 	raw_spin_lock_init(&lock->wait_lock);
46 	INIT_LIST_HEAD(&lock->wait_list);
47 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
48 	osq_lock_init(&lock->osq);
49 #endif
50 
51 	debug_mutex_init(lock, name, key);
52 }
53 EXPORT_SYMBOL(__mutex_init);
54 
55 /*
56  * @owner: contains: 'struct task_struct *' to the current lock owner,
57  * NULL means not owned. Since task_struct pointers are aligned at
58  * at least L1_CACHE_BYTES, we have low bits to store extra state.
59  *
60  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
61  * Bit1 indicates unlock needs to hand the lock to the top-waiter
62  * Bit2 indicates handoff has been done and we're waiting for pickup.
63  */
64 #define MUTEX_FLAG_WAITERS	0x01
65 #define MUTEX_FLAG_HANDOFF	0x02
66 #define MUTEX_FLAG_PICKUP	0x04
67 
68 #define MUTEX_FLAGS		0x07
69 
70 /*
71  * Internal helper function; C doesn't allow us to hide it :/
72  *
73  * DO NOT USE (outside of mutex code).
74  */
75 static inline struct task_struct *__mutex_owner(struct mutex *lock)
76 {
77 	return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
78 }
79 
80 static inline struct task_struct *__owner_task(unsigned long owner)
81 {
82 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
83 }
84 
85 bool mutex_is_locked(struct mutex *lock)
86 {
87 	return __mutex_owner(lock) != NULL;
88 }
89 EXPORT_SYMBOL(mutex_is_locked);
90 
91 static inline unsigned long __owner_flags(unsigned long owner)
92 {
93 	return owner & MUTEX_FLAGS;
94 }
95 
96 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
97 {
98 	unsigned long owner, curr = (unsigned long)current;
99 
100 	owner = atomic_long_read(&lock->owner);
101 	for (;;) { /* must loop, can race against a flag */
102 		unsigned long flags = __owner_flags(owner);
103 		unsigned long task = owner & ~MUTEX_FLAGS;
104 
105 		if (task) {
106 			if (flags & MUTEX_FLAG_PICKUP) {
107 				if (task != curr)
108 					break;
109 				flags &= ~MUTEX_FLAG_PICKUP;
110 			} else if (handoff) {
111 				if (flags & MUTEX_FLAG_HANDOFF)
112 					break;
113 				flags |= MUTEX_FLAG_HANDOFF;
114 			} else {
115 				break;
116 			}
117 		} else {
118 			MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
119 			task = curr;
120 		}
121 
122 		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
123 			if (task == curr)
124 				return NULL;
125 			break;
126 		}
127 	}
128 
129 	return __owner_task(owner);
130 }
131 
132 /*
133  * Trylock or set HANDOFF
134  */
135 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
136 {
137 	return !__mutex_trylock_common(lock, handoff);
138 }
139 
140 /*
141  * Actual trylock that will work on any unlocked state.
142  */
143 static inline bool __mutex_trylock(struct mutex *lock)
144 {
145 	return !__mutex_trylock_common(lock, false);
146 }
147 
148 #ifndef CONFIG_DEBUG_LOCK_ALLOC
149 /*
150  * Lockdep annotations are contained to the slow paths for simplicity.
151  * There is nothing that would stop spreading the lockdep annotations outwards
152  * except more code.
153  */
154 
155 /*
156  * Optimistic trylock that only works in the uncontended case. Make sure to
157  * follow with a __mutex_trylock() before failing.
158  */
159 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
160 {
161 	unsigned long curr = (unsigned long)current;
162 	unsigned long zero = 0UL;
163 
164 	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
165 		return true;
166 
167 	return false;
168 }
169 
170 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
171 {
172 	unsigned long curr = (unsigned long)current;
173 
174 	return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
175 }
176 #endif
177 
178 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
179 {
180 	atomic_long_or(flag, &lock->owner);
181 }
182 
183 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
184 {
185 	atomic_long_andnot(flag, &lock->owner);
186 }
187 
188 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
189 {
190 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
191 }
192 
193 /*
194  * Add @waiter to a given location in the lock wait_list and set the
195  * FLAG_WAITERS flag if it's the first waiter.
196  */
197 static void
198 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
199 		   struct list_head *list)
200 {
201 	debug_mutex_add_waiter(lock, waiter, current);
202 
203 	list_add_tail(&waiter->list, list);
204 	if (__mutex_waiter_is_first(lock, waiter))
205 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
206 }
207 
208 static void
209 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
210 {
211 	list_del(&waiter->list);
212 	if (likely(list_empty(&lock->wait_list)))
213 		__mutex_clear_flag(lock, MUTEX_FLAGS);
214 
215 	debug_mutex_remove_waiter(lock, waiter, current);
216 }
217 
218 /*
219  * Give up ownership to a specific task, when @task = NULL, this is equivalent
220  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
221  * WAITERS. Provides RELEASE semantics like a regular unlock, the
222  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
223  */
224 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
225 {
226 	unsigned long owner = atomic_long_read(&lock->owner);
227 
228 	for (;;) {
229 		unsigned long new;
230 
231 		MUTEX_WARN_ON(__owner_task(owner) != current);
232 		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
233 
234 		new = (owner & MUTEX_FLAG_WAITERS);
235 		new |= (unsigned long)task;
236 		if (task)
237 			new |= MUTEX_FLAG_PICKUP;
238 
239 		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
240 			break;
241 	}
242 }
243 
244 #ifndef CONFIG_DEBUG_LOCK_ALLOC
245 /*
246  * We split the mutex lock/unlock logic into separate fastpath and
247  * slowpath functions, to reduce the register pressure on the fastpath.
248  * We also put the fastpath first in the kernel image, to make sure the
249  * branch is predicted by the CPU as default-untaken.
250  */
251 static void __sched __mutex_lock_slowpath(struct mutex *lock);
252 
253 /**
254  * mutex_lock - acquire the mutex
255  * @lock: the mutex to be acquired
256  *
257  * Lock the mutex exclusively for this task. If the mutex is not
258  * available right now, it will sleep until it can get it.
259  *
260  * The mutex must later on be released by the same task that
261  * acquired it. Recursive locking is not allowed. The task
262  * may not exit without first unlocking the mutex. Also, kernel
263  * memory where the mutex resides must not be freed with
264  * the mutex still locked. The mutex must first be initialized
265  * (or statically defined) before it can be locked. memset()-ing
266  * the mutex to 0 is not allowed.
267  *
268  * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
269  * checks that will enforce the restrictions and will also do
270  * deadlock debugging)
271  *
272  * This function is similar to (but not equivalent to) down().
273  */
274 void __sched mutex_lock(struct mutex *lock)
275 {
276 	might_sleep();
277 
278 	if (!__mutex_trylock_fast(lock))
279 		__mutex_lock_slowpath(lock);
280 }
281 EXPORT_SYMBOL(mutex_lock);
282 #endif
283 
284 /*
285  * Wait-Die:
286  *   The newer transactions are killed when:
287  *     It (the new transaction) makes a request for a lock being held
288  *     by an older transaction.
289  *
290  * Wound-Wait:
291  *   The newer transactions are wounded when:
292  *     An older transaction makes a request for a lock being held by
293  *     the newer transaction.
294  */
295 
296 /*
297  * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
298  * it.
299  */
300 static __always_inline void
301 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
302 {
303 #ifdef CONFIG_DEBUG_MUTEXES
304 	/*
305 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
306 	 * but released with a normal mutex_unlock in this call.
307 	 *
308 	 * This should never happen, always use ww_mutex_unlock.
309 	 */
310 	DEBUG_LOCKS_WARN_ON(ww->ctx);
311 
312 	/*
313 	 * Not quite done after calling ww_acquire_done() ?
314 	 */
315 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
316 
317 	if (ww_ctx->contending_lock) {
318 		/*
319 		 * After -EDEADLK you tried to
320 		 * acquire a different ww_mutex? Bad!
321 		 */
322 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
323 
324 		/*
325 		 * You called ww_mutex_lock after receiving -EDEADLK,
326 		 * but 'forgot' to unlock everything else first?
327 		 */
328 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
329 		ww_ctx->contending_lock = NULL;
330 	}
331 
332 	/*
333 	 * Naughty, using a different class will lead to undefined behavior!
334 	 */
335 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
336 #endif
337 	ww_ctx->acquired++;
338 	ww->ctx = ww_ctx;
339 }
340 
341 /*
342  * Determine if context @a is 'after' context @b. IOW, @a is a younger
343  * transaction than @b and depending on algorithm either needs to wait for
344  * @b or die.
345  */
346 static inline bool __sched
347 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
348 {
349 
350 	return (signed long)(a->stamp - b->stamp) > 0;
351 }
352 
353 /*
354  * Wait-Die; wake a younger waiter context (when locks held) such that it can
355  * die.
356  *
357  * Among waiters with context, only the first one can have other locks acquired
358  * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
359  * __ww_mutex_check_kill() wake any but the earliest context.
360  */
361 static bool __sched
362 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
363 	       struct ww_acquire_ctx *ww_ctx)
364 {
365 	if (!ww_ctx->is_wait_die)
366 		return false;
367 
368 	if (waiter->ww_ctx->acquired > 0 &&
369 			__ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
370 		debug_mutex_wake_waiter(lock, waiter);
371 		wake_up_process(waiter->task);
372 	}
373 
374 	return true;
375 }
376 
377 /*
378  * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
379  *
380  * Wound the lock holder if there are waiters with older transactions than
381  * the lock holders. Even if multiple waiters may wound the lock holder,
382  * it's sufficient that only one does.
383  */
384 static bool __ww_mutex_wound(struct mutex *lock,
385 			     struct ww_acquire_ctx *ww_ctx,
386 			     struct ww_acquire_ctx *hold_ctx)
387 {
388 	struct task_struct *owner = __mutex_owner(lock);
389 
390 	lockdep_assert_held(&lock->wait_lock);
391 
392 	/*
393 	 * Possible through __ww_mutex_add_waiter() when we race with
394 	 * ww_mutex_set_context_fastpath(). In that case we'll get here again
395 	 * through __ww_mutex_check_waiters().
396 	 */
397 	if (!hold_ctx)
398 		return false;
399 
400 	/*
401 	 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
402 	 * it cannot go away because we'll have FLAG_WAITERS set and hold
403 	 * wait_lock.
404 	 */
405 	if (!owner)
406 		return false;
407 
408 	if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
409 		hold_ctx->wounded = 1;
410 
411 		/*
412 		 * wake_up_process() paired with set_current_state()
413 		 * inserts sufficient barriers to make sure @owner either sees
414 		 * it's wounded in __ww_mutex_check_kill() or has a
415 		 * wakeup pending to re-read the wounded state.
416 		 */
417 		if (owner != current)
418 			wake_up_process(owner);
419 
420 		return true;
421 	}
422 
423 	return false;
424 }
425 
426 /*
427  * We just acquired @lock under @ww_ctx, if there are later contexts waiting
428  * behind us on the wait-list, check if they need to die, or wound us.
429  *
430  * See __ww_mutex_add_waiter() for the list-order construction; basically the
431  * list is ordered by stamp, smallest (oldest) first.
432  *
433  * This relies on never mixing wait-die/wound-wait on the same wait-list;
434  * which is currently ensured by that being a ww_class property.
435  *
436  * The current task must not be on the wait list.
437  */
438 static void __sched
439 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
440 {
441 	struct mutex_waiter *cur;
442 
443 	lockdep_assert_held(&lock->wait_lock);
444 
445 	list_for_each_entry(cur, &lock->wait_list, list) {
446 		if (!cur->ww_ctx)
447 			continue;
448 
449 		if (__ww_mutex_die(lock, cur, ww_ctx) ||
450 		    __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
451 			break;
452 	}
453 }
454 
455 /*
456  * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
457  * and wake up any waiters so they can recheck.
458  */
459 static __always_inline void
460 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
461 {
462 	ww_mutex_lock_acquired(lock, ctx);
463 
464 	/*
465 	 * The lock->ctx update should be visible on all cores before
466 	 * the WAITERS check is done, otherwise contended waiters might be
467 	 * missed. The contended waiters will either see ww_ctx == NULL
468 	 * and keep spinning, or it will acquire wait_lock, add itself
469 	 * to waiter list and sleep.
470 	 */
471 	smp_mb(); /* See comments above and below. */
472 
473 	/*
474 	 * [W] ww->ctx = ctx	    [W] MUTEX_FLAG_WAITERS
475 	 *     MB		        MB
476 	 * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
477 	 *
478 	 * The memory barrier above pairs with the memory barrier in
479 	 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
480 	 * and/or !empty list.
481 	 */
482 	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
483 		return;
484 
485 	/*
486 	 * Uh oh, we raced in fastpath, check if any of the waiters need to
487 	 * die or wound us.
488 	 */
489 	raw_spin_lock(&lock->base.wait_lock);
490 	__ww_mutex_check_waiters(&lock->base, ctx);
491 	raw_spin_unlock(&lock->base.wait_lock);
492 }
493 
494 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
495 
496 /*
497  * Trylock variant that returns the owning task on failure.
498  */
499 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
500 {
501 	return __mutex_trylock_common(lock, false);
502 }
503 
504 static inline
505 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
506 			    struct mutex_waiter *waiter)
507 {
508 	struct ww_mutex *ww;
509 
510 	ww = container_of(lock, struct ww_mutex, base);
511 
512 	/*
513 	 * If ww->ctx is set the contents are undefined, only
514 	 * by acquiring wait_lock there is a guarantee that
515 	 * they are not invalid when reading.
516 	 *
517 	 * As such, when deadlock detection needs to be
518 	 * performed the optimistic spinning cannot be done.
519 	 *
520 	 * Check this in every inner iteration because we may
521 	 * be racing against another thread's ww_mutex_lock.
522 	 */
523 	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
524 		return false;
525 
526 	/*
527 	 * If we aren't on the wait list yet, cancel the spin
528 	 * if there are waiters. We want  to avoid stealing the
529 	 * lock from a waiter with an earlier stamp, since the
530 	 * other thread may already own a lock that we also
531 	 * need.
532 	 */
533 	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
534 		return false;
535 
536 	/*
537 	 * Similarly, stop spinning if we are no longer the
538 	 * first waiter.
539 	 */
540 	if (waiter && !__mutex_waiter_is_first(lock, waiter))
541 		return false;
542 
543 	return true;
544 }
545 
546 /*
547  * Look out! "owner" is an entirely speculative pointer access and not
548  * reliable.
549  *
550  * "noinline" so that this function shows up on perf profiles.
551  */
552 static noinline
553 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
554 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
555 {
556 	bool ret = true;
557 
558 	rcu_read_lock();
559 	while (__mutex_owner(lock) == owner) {
560 		/*
561 		 * Ensure we emit the owner->on_cpu, dereference _after_
562 		 * checking lock->owner still matches owner. If that fails,
563 		 * owner might point to freed memory. If it still matches,
564 		 * the rcu_read_lock() ensures the memory stays valid.
565 		 */
566 		barrier();
567 
568 		/*
569 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
570 		 */
571 		if (!owner->on_cpu || need_resched() ||
572 				vcpu_is_preempted(task_cpu(owner))) {
573 			ret = false;
574 			break;
575 		}
576 
577 		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
578 			ret = false;
579 			break;
580 		}
581 
582 		cpu_relax();
583 	}
584 	rcu_read_unlock();
585 
586 	return ret;
587 }
588 
589 /*
590  * Initial check for entering the mutex spinning loop
591  */
592 static inline int mutex_can_spin_on_owner(struct mutex *lock)
593 {
594 	struct task_struct *owner;
595 	int retval = 1;
596 
597 	if (need_resched())
598 		return 0;
599 
600 	rcu_read_lock();
601 	owner = __mutex_owner(lock);
602 
603 	/*
604 	 * As lock holder preemption issue, we both skip spinning if task is not
605 	 * on cpu or its cpu is preempted
606 	 */
607 	if (owner)
608 		retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
609 	rcu_read_unlock();
610 
611 	/*
612 	 * If lock->owner is not set, the mutex has been released. Return true
613 	 * such that we'll trylock in the spin path, which is a faster option
614 	 * than the blocking slow path.
615 	 */
616 	return retval;
617 }
618 
619 /*
620  * Optimistic spinning.
621  *
622  * We try to spin for acquisition when we find that the lock owner
623  * is currently running on a (different) CPU and while we don't
624  * need to reschedule. The rationale is that if the lock owner is
625  * running, it is likely to release the lock soon.
626  *
627  * The mutex spinners are queued up using MCS lock so that only one
628  * spinner can compete for the mutex. However, if mutex spinning isn't
629  * going to happen, there is no point in going through the lock/unlock
630  * overhead.
631  *
632  * Returns true when the lock was taken, otherwise false, indicating
633  * that we need to jump to the slowpath and sleep.
634  *
635  * The waiter flag is set to true if the spinner is a waiter in the wait
636  * queue. The waiter-spinner will spin on the lock directly and concurrently
637  * with the spinner at the head of the OSQ, if present, until the owner is
638  * changed to itself.
639  */
640 static __always_inline bool
641 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
642 		      struct mutex_waiter *waiter)
643 {
644 	if (!waiter) {
645 		/*
646 		 * The purpose of the mutex_can_spin_on_owner() function is
647 		 * to eliminate the overhead of osq_lock() and osq_unlock()
648 		 * in case spinning isn't possible. As a waiter-spinner
649 		 * is not going to take OSQ lock anyway, there is no need
650 		 * to call mutex_can_spin_on_owner().
651 		 */
652 		if (!mutex_can_spin_on_owner(lock))
653 			goto fail;
654 
655 		/*
656 		 * In order to avoid a stampede of mutex spinners trying to
657 		 * acquire the mutex all at once, the spinners need to take a
658 		 * MCS (queued) lock first before spinning on the owner field.
659 		 */
660 		if (!osq_lock(&lock->osq))
661 			goto fail;
662 	}
663 
664 	for (;;) {
665 		struct task_struct *owner;
666 
667 		/* Try to acquire the mutex... */
668 		owner = __mutex_trylock_or_owner(lock);
669 		if (!owner)
670 			break;
671 
672 		/*
673 		 * There's an owner, wait for it to either
674 		 * release the lock or go to sleep.
675 		 */
676 		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
677 			goto fail_unlock;
678 
679 		/*
680 		 * The cpu_relax() call is a compiler barrier which forces
681 		 * everything in this loop to be re-loaded. We don't need
682 		 * memory barriers as we'll eventually observe the right
683 		 * values at the cost of a few extra spins.
684 		 */
685 		cpu_relax();
686 	}
687 
688 	if (!waiter)
689 		osq_unlock(&lock->osq);
690 
691 	return true;
692 
693 
694 fail_unlock:
695 	if (!waiter)
696 		osq_unlock(&lock->osq);
697 
698 fail:
699 	/*
700 	 * If we fell out of the spin path because of need_resched(),
701 	 * reschedule now, before we try-lock the mutex. This avoids getting
702 	 * scheduled out right after we obtained the mutex.
703 	 */
704 	if (need_resched()) {
705 		/*
706 		 * We _should_ have TASK_RUNNING here, but just in case
707 		 * we do not, make it so, otherwise we might get stuck.
708 		 */
709 		__set_current_state(TASK_RUNNING);
710 		schedule_preempt_disabled();
711 	}
712 
713 	return false;
714 }
715 #else
716 static __always_inline bool
717 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
718 		      struct mutex_waiter *waiter)
719 {
720 	return false;
721 }
722 #endif
723 
724 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
725 
726 /**
727  * mutex_unlock - release the mutex
728  * @lock: the mutex to be released
729  *
730  * Unlock a mutex that has been locked by this task previously.
731  *
732  * This function must not be used in interrupt context. Unlocking
733  * of a not locked mutex is not allowed.
734  *
735  * This function is similar to (but not equivalent to) up().
736  */
737 void __sched mutex_unlock(struct mutex *lock)
738 {
739 #ifndef CONFIG_DEBUG_LOCK_ALLOC
740 	if (__mutex_unlock_fast(lock))
741 		return;
742 #endif
743 	__mutex_unlock_slowpath(lock, _RET_IP_);
744 }
745 EXPORT_SYMBOL(mutex_unlock);
746 
747 static void __ww_mutex_unlock(struct ww_mutex *lock)
748 {
749 	/*
750 	 * The unlocking fastpath is the 0->1 transition from 'locked'
751 	 * into 'unlocked' state:
752 	 */
753 	if (lock->ctx) {
754 		MUTEX_WARN_ON(!lock->ctx->acquired);
755 		if (lock->ctx->acquired > 0)
756 			lock->ctx->acquired--;
757 		lock->ctx = NULL;
758 	}
759 }
760 
761 /**
762  * ww_mutex_unlock - release the w/w mutex
763  * @lock: the mutex to be released
764  *
765  * Unlock a mutex that has been locked by this task previously with any of the
766  * ww_mutex_lock* functions (with or without an acquire context). It is
767  * forbidden to release the locks after releasing the acquire context.
768  *
769  * This function must not be used in interrupt context. Unlocking
770  * of a unlocked mutex is not allowed.
771  */
772 void __sched ww_mutex_unlock(struct ww_mutex *lock)
773 {
774 	__ww_mutex_unlock(lock);
775 	mutex_unlock(&lock->base);
776 }
777 EXPORT_SYMBOL(ww_mutex_unlock);
778 
779 
780 static __always_inline int __sched
781 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
782 {
783 	if (ww_ctx->acquired > 0) {
784 #ifdef CONFIG_DEBUG_MUTEXES
785 		struct ww_mutex *ww;
786 
787 		ww = container_of(lock, struct ww_mutex, base);
788 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
789 		ww_ctx->contending_lock = ww;
790 #endif
791 		return -EDEADLK;
792 	}
793 
794 	return 0;
795 }
796 
797 
798 /*
799  * Check the wound condition for the current lock acquire.
800  *
801  * Wound-Wait: If we're wounded, kill ourself.
802  *
803  * Wait-Die: If we're trying to acquire a lock already held by an older
804  *           context, kill ourselves.
805  *
806  * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
807  * look at waiters before us in the wait-list.
808  */
809 static inline int __sched
810 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
811 		      struct ww_acquire_ctx *ctx)
812 {
813 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
814 	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
815 	struct mutex_waiter *cur;
816 
817 	if (ctx->acquired == 0)
818 		return 0;
819 
820 	if (!ctx->is_wait_die) {
821 		if (ctx->wounded)
822 			return __ww_mutex_kill(lock, ctx);
823 
824 		return 0;
825 	}
826 
827 	if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
828 		return __ww_mutex_kill(lock, ctx);
829 
830 	/*
831 	 * If there is a waiter in front of us that has a context, then its
832 	 * stamp is earlier than ours and we must kill ourself.
833 	 */
834 	cur = waiter;
835 	list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
836 		if (!cur->ww_ctx)
837 			continue;
838 
839 		return __ww_mutex_kill(lock, ctx);
840 	}
841 
842 	return 0;
843 }
844 
845 /*
846  * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
847  * first. Such that older contexts are preferred to acquire the lock over
848  * younger contexts.
849  *
850  * Waiters without context are interspersed in FIFO order.
851  *
852  * Furthermore, for Wait-Die kill ourself immediately when possible (there are
853  * older contexts already waiting) to avoid unnecessary waiting and for
854  * Wound-Wait ensure we wound the owning context when it is younger.
855  */
856 static inline int __sched
857 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
858 		      struct mutex *lock,
859 		      struct ww_acquire_ctx *ww_ctx)
860 {
861 	struct mutex_waiter *cur;
862 	struct list_head *pos;
863 	bool is_wait_die;
864 
865 	if (!ww_ctx) {
866 		__mutex_add_waiter(lock, waiter, &lock->wait_list);
867 		return 0;
868 	}
869 
870 	is_wait_die = ww_ctx->is_wait_die;
871 
872 	/*
873 	 * Add the waiter before the first waiter with a higher stamp.
874 	 * Waiters without a context are skipped to avoid starving
875 	 * them. Wait-Die waiters may die here. Wound-Wait waiters
876 	 * never die here, but they are sorted in stamp order and
877 	 * may wound the lock holder.
878 	 */
879 	pos = &lock->wait_list;
880 	list_for_each_entry_reverse(cur, &lock->wait_list, list) {
881 		if (!cur->ww_ctx)
882 			continue;
883 
884 		if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
885 			/*
886 			 * Wait-Die: if we find an older context waiting, there
887 			 * is no point in queueing behind it, as we'd have to
888 			 * die the moment it would acquire the lock.
889 			 */
890 			if (is_wait_die) {
891 				int ret = __ww_mutex_kill(lock, ww_ctx);
892 
893 				if (ret)
894 					return ret;
895 			}
896 
897 			break;
898 		}
899 
900 		pos = &cur->list;
901 
902 		/* Wait-Die: ensure younger waiters die. */
903 		__ww_mutex_die(lock, cur, ww_ctx);
904 	}
905 
906 	__mutex_add_waiter(lock, waiter, pos);
907 
908 	/*
909 	 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
910 	 * wound that such that we might proceed.
911 	 */
912 	if (!is_wait_die) {
913 		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
914 
915 		/*
916 		 * See ww_mutex_set_context_fastpath(). Orders setting
917 		 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
918 		 * such that either we or the fastpath will wound @ww->ctx.
919 		 */
920 		smp_mb();
921 		__ww_mutex_wound(lock, ww_ctx, ww->ctx);
922 	}
923 
924 	return 0;
925 }
926 
927 /*
928  * Lock a mutex (possibly interruptible), slowpath:
929  */
930 static __always_inline int __sched
931 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
932 		    struct lockdep_map *nest_lock, unsigned long ip,
933 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
934 {
935 	struct mutex_waiter waiter;
936 	struct ww_mutex *ww;
937 	int ret;
938 
939 	if (!use_ww_ctx)
940 		ww_ctx = NULL;
941 
942 	might_sleep();
943 
944 	MUTEX_WARN_ON(lock->magic != lock);
945 
946 	ww = container_of(lock, struct ww_mutex, base);
947 	if (ww_ctx) {
948 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
949 			return -EALREADY;
950 
951 		/*
952 		 * Reset the wounded flag after a kill. No other process can
953 		 * race and wound us here since they can't have a valid owner
954 		 * pointer if we don't have any locks held.
955 		 */
956 		if (ww_ctx->acquired == 0)
957 			ww_ctx->wounded = 0;
958 
959 #ifdef CONFIG_DEBUG_LOCK_ALLOC
960 		nest_lock = &ww_ctx->dep_map;
961 #endif
962 	}
963 
964 	preempt_disable();
965 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
966 
967 	if (__mutex_trylock(lock) ||
968 	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
969 		/* got the lock, yay! */
970 		lock_acquired(&lock->dep_map, ip);
971 		if (ww_ctx)
972 			ww_mutex_set_context_fastpath(ww, ww_ctx);
973 		preempt_enable();
974 		return 0;
975 	}
976 
977 	raw_spin_lock(&lock->wait_lock);
978 	/*
979 	 * After waiting to acquire the wait_lock, try again.
980 	 */
981 	if (__mutex_trylock(lock)) {
982 		if (ww_ctx)
983 			__ww_mutex_check_waiters(lock, ww_ctx);
984 
985 		goto skip_wait;
986 	}
987 
988 	debug_mutex_lock_common(lock, &waiter);
989 	waiter.task = current;
990 	if (ww_ctx)
991 		waiter.ww_ctx = ww_ctx;
992 
993 	lock_contended(&lock->dep_map, ip);
994 
995 	if (!use_ww_ctx) {
996 		/* add waiting tasks to the end of the waitqueue (FIFO): */
997 		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
998 	} else {
999 		/*
1000 		 * Add in stamp order, waking up waiters that must kill
1001 		 * themselves.
1002 		 */
1003 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
1004 		if (ret)
1005 			goto err_early_kill;
1006 	}
1007 
1008 	set_current_state(state);
1009 	for (;;) {
1010 		bool first;
1011 
1012 		/*
1013 		 * Once we hold wait_lock, we're serialized against
1014 		 * mutex_unlock() handing the lock off to us, do a trylock
1015 		 * before testing the error conditions to make sure we pick up
1016 		 * the handoff.
1017 		 */
1018 		if (__mutex_trylock(lock))
1019 			goto acquired;
1020 
1021 		/*
1022 		 * Check for signals and kill conditions while holding
1023 		 * wait_lock. This ensures the lock cancellation is ordered
1024 		 * against mutex_unlock() and wake-ups do not go missing.
1025 		 */
1026 		if (signal_pending_state(state, current)) {
1027 			ret = -EINTR;
1028 			goto err;
1029 		}
1030 
1031 		if (ww_ctx) {
1032 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1033 			if (ret)
1034 				goto err;
1035 		}
1036 
1037 		raw_spin_unlock(&lock->wait_lock);
1038 		schedule_preempt_disabled();
1039 
1040 		first = __mutex_waiter_is_first(lock, &waiter);
1041 
1042 		set_current_state(state);
1043 		/*
1044 		 * Here we order against unlock; we must either see it change
1045 		 * state back to RUNNING and fall through the next schedule(),
1046 		 * or we must see its unlock and acquire.
1047 		 */
1048 		if (__mutex_trylock_or_handoff(lock, first) ||
1049 		    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
1050 			break;
1051 
1052 		raw_spin_lock(&lock->wait_lock);
1053 	}
1054 	raw_spin_lock(&lock->wait_lock);
1055 acquired:
1056 	__set_current_state(TASK_RUNNING);
1057 
1058 	if (ww_ctx) {
1059 		/*
1060 		 * Wound-Wait; we stole the lock (!first_waiter), check the
1061 		 * waiters as anyone might want to wound us.
1062 		 */
1063 		if (!ww_ctx->is_wait_die &&
1064 		    !__mutex_waiter_is_first(lock, &waiter))
1065 			__ww_mutex_check_waiters(lock, ww_ctx);
1066 	}
1067 
1068 	__mutex_remove_waiter(lock, &waiter);
1069 
1070 	debug_mutex_free_waiter(&waiter);
1071 
1072 skip_wait:
1073 	/* got the lock - cleanup and rejoice! */
1074 	lock_acquired(&lock->dep_map, ip);
1075 
1076 	if (ww_ctx)
1077 		ww_mutex_lock_acquired(ww, ww_ctx);
1078 
1079 	raw_spin_unlock(&lock->wait_lock);
1080 	preempt_enable();
1081 	return 0;
1082 
1083 err:
1084 	__set_current_state(TASK_RUNNING);
1085 	__mutex_remove_waiter(lock, &waiter);
1086 err_early_kill:
1087 	raw_spin_unlock(&lock->wait_lock);
1088 	debug_mutex_free_waiter(&waiter);
1089 	mutex_release(&lock->dep_map, ip);
1090 	preempt_enable();
1091 	return ret;
1092 }
1093 
1094 static int __sched
1095 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
1096 	     struct lockdep_map *nest_lock, unsigned long ip)
1097 {
1098 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1099 }
1100 
1101 static int __sched
1102 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
1103 		unsigned long ip, struct ww_acquire_ctx *ww_ctx)
1104 {
1105 	return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
1106 }
1107 
1108 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1109 void __sched
1110 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1111 {
1112 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1113 }
1114 
1115 EXPORT_SYMBOL_GPL(mutex_lock_nested);
1116 
1117 void __sched
1118 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1119 {
1120 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1121 }
1122 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1123 
1124 int __sched
1125 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1126 {
1127 	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1128 }
1129 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1130 
1131 int __sched
1132 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1133 {
1134 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1135 }
1136 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1137 
1138 void __sched
1139 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1140 {
1141 	int token;
1142 
1143 	might_sleep();
1144 
1145 	token = io_schedule_prepare();
1146 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1147 			    subclass, NULL, _RET_IP_, NULL, 0);
1148 	io_schedule_finish(token);
1149 }
1150 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1151 
1152 static inline int
1153 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1154 {
1155 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1156 	unsigned tmp;
1157 
1158 	if (ctx->deadlock_inject_countdown-- == 0) {
1159 		tmp = ctx->deadlock_inject_interval;
1160 		if (tmp > UINT_MAX/4)
1161 			tmp = UINT_MAX;
1162 		else
1163 			tmp = tmp*2 + tmp + tmp/2;
1164 
1165 		ctx->deadlock_inject_interval = tmp;
1166 		ctx->deadlock_inject_countdown = tmp;
1167 		ctx->contending_lock = lock;
1168 
1169 		ww_mutex_unlock(lock);
1170 
1171 		return -EDEADLK;
1172 	}
1173 #endif
1174 
1175 	return 0;
1176 }
1177 
1178 int __sched
1179 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1180 {
1181 	int ret;
1182 
1183 	might_sleep();
1184 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1185 			       0, _RET_IP_, ctx);
1186 	if (!ret && ctx && ctx->acquired > 1)
1187 		return ww_mutex_deadlock_injection(lock, ctx);
1188 
1189 	return ret;
1190 }
1191 EXPORT_SYMBOL_GPL(ww_mutex_lock);
1192 
1193 int __sched
1194 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1195 {
1196 	int ret;
1197 
1198 	might_sleep();
1199 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1200 			      0, _RET_IP_, ctx);
1201 
1202 	if (!ret && ctx && ctx->acquired > 1)
1203 		return ww_mutex_deadlock_injection(lock, ctx);
1204 
1205 	return ret;
1206 }
1207 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1208 
1209 #endif
1210 
1211 /*
1212  * Release the lock, slowpath:
1213  */
1214 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1215 {
1216 	struct task_struct *next = NULL;
1217 	DEFINE_WAKE_Q(wake_q);
1218 	unsigned long owner;
1219 
1220 	mutex_release(&lock->dep_map, ip);
1221 
1222 	/*
1223 	 * Release the lock before (potentially) taking the spinlock such that
1224 	 * other contenders can get on with things ASAP.
1225 	 *
1226 	 * Except when HANDOFF, in that case we must not clear the owner field,
1227 	 * but instead set it to the top waiter.
1228 	 */
1229 	owner = atomic_long_read(&lock->owner);
1230 	for (;;) {
1231 		MUTEX_WARN_ON(__owner_task(owner) != current);
1232 		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1233 
1234 		if (owner & MUTEX_FLAG_HANDOFF)
1235 			break;
1236 
1237 		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
1238 			if (owner & MUTEX_FLAG_WAITERS)
1239 				break;
1240 
1241 			return;
1242 		}
1243 	}
1244 
1245 	raw_spin_lock(&lock->wait_lock);
1246 	debug_mutex_unlock(lock);
1247 	if (!list_empty(&lock->wait_list)) {
1248 		/* get the first entry from the wait-list: */
1249 		struct mutex_waiter *waiter =
1250 			list_first_entry(&lock->wait_list,
1251 					 struct mutex_waiter, list);
1252 
1253 		next = waiter->task;
1254 
1255 		debug_mutex_wake_waiter(lock, waiter);
1256 		wake_q_add(&wake_q, next);
1257 	}
1258 
1259 	if (owner & MUTEX_FLAG_HANDOFF)
1260 		__mutex_handoff(lock, next);
1261 
1262 	raw_spin_unlock(&lock->wait_lock);
1263 
1264 	wake_up_q(&wake_q);
1265 }
1266 
1267 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1268 /*
1269  * Here come the less common (and hence less performance-critical) APIs:
1270  * mutex_lock_interruptible() and mutex_trylock().
1271  */
1272 static noinline int __sched
1273 __mutex_lock_killable_slowpath(struct mutex *lock);
1274 
1275 static noinline int __sched
1276 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1277 
1278 /**
1279  * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1280  * @lock: The mutex to be acquired.
1281  *
1282  * Lock the mutex like mutex_lock().  If a signal is delivered while the
1283  * process is sleeping, this function will return without acquiring the
1284  * mutex.
1285  *
1286  * Context: Process context.
1287  * Return: 0 if the lock was successfully acquired or %-EINTR if a
1288  * signal arrived.
1289  */
1290 int __sched mutex_lock_interruptible(struct mutex *lock)
1291 {
1292 	might_sleep();
1293 
1294 	if (__mutex_trylock_fast(lock))
1295 		return 0;
1296 
1297 	return __mutex_lock_interruptible_slowpath(lock);
1298 }
1299 
1300 EXPORT_SYMBOL(mutex_lock_interruptible);
1301 
1302 /**
1303  * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1304  * @lock: The mutex to be acquired.
1305  *
1306  * Lock the mutex like mutex_lock().  If a signal which will be fatal to
1307  * the current process is delivered while the process is sleeping, this
1308  * function will return without acquiring the mutex.
1309  *
1310  * Context: Process context.
1311  * Return: 0 if the lock was successfully acquired or %-EINTR if a
1312  * fatal signal arrived.
1313  */
1314 int __sched mutex_lock_killable(struct mutex *lock)
1315 {
1316 	might_sleep();
1317 
1318 	if (__mutex_trylock_fast(lock))
1319 		return 0;
1320 
1321 	return __mutex_lock_killable_slowpath(lock);
1322 }
1323 EXPORT_SYMBOL(mutex_lock_killable);
1324 
1325 /**
1326  * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1327  * @lock: The mutex to be acquired.
1328  *
1329  * Lock the mutex like mutex_lock().  While the task is waiting for this
1330  * mutex, it will be accounted as being in the IO wait state by the
1331  * scheduler.
1332  *
1333  * Context: Process context.
1334  */
1335 void __sched mutex_lock_io(struct mutex *lock)
1336 {
1337 	int token;
1338 
1339 	token = io_schedule_prepare();
1340 	mutex_lock(lock);
1341 	io_schedule_finish(token);
1342 }
1343 EXPORT_SYMBOL_GPL(mutex_lock_io);
1344 
1345 static noinline void __sched
1346 __mutex_lock_slowpath(struct mutex *lock)
1347 {
1348 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1349 }
1350 
1351 static noinline int __sched
1352 __mutex_lock_killable_slowpath(struct mutex *lock)
1353 {
1354 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1355 }
1356 
1357 static noinline int __sched
1358 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1359 {
1360 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1361 }
1362 
1363 static noinline int __sched
1364 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1365 {
1366 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1367 			       _RET_IP_, ctx);
1368 }
1369 
1370 static noinline int __sched
1371 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1372 					    struct ww_acquire_ctx *ctx)
1373 {
1374 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1375 			       _RET_IP_, ctx);
1376 }
1377 
1378 #endif
1379 
1380 /**
1381  * mutex_trylock - try to acquire the mutex, without waiting
1382  * @lock: the mutex to be acquired
1383  *
1384  * Try to acquire the mutex atomically. Returns 1 if the mutex
1385  * has been acquired successfully, and 0 on contention.
1386  *
1387  * NOTE: this function follows the spin_trylock() convention, so
1388  * it is negated from the down_trylock() return values! Be careful
1389  * about this when converting semaphore users to mutexes.
1390  *
1391  * This function must not be used in interrupt context. The
1392  * mutex must be released by the same task that acquired it.
1393  */
1394 int __sched mutex_trylock(struct mutex *lock)
1395 {
1396 	bool locked;
1397 
1398 	MUTEX_WARN_ON(lock->magic != lock);
1399 
1400 	locked = __mutex_trylock(lock);
1401 	if (locked)
1402 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1403 
1404 	return locked;
1405 }
1406 EXPORT_SYMBOL(mutex_trylock);
1407 
1408 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1409 int __sched
1410 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1411 {
1412 	might_sleep();
1413 
1414 	if (__mutex_trylock_fast(&lock->base)) {
1415 		if (ctx)
1416 			ww_mutex_set_context_fastpath(lock, ctx);
1417 		return 0;
1418 	}
1419 
1420 	return __ww_mutex_lock_slowpath(lock, ctx);
1421 }
1422 EXPORT_SYMBOL(ww_mutex_lock);
1423 
1424 int __sched
1425 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1426 {
1427 	might_sleep();
1428 
1429 	if (__mutex_trylock_fast(&lock->base)) {
1430 		if (ctx)
1431 			ww_mutex_set_context_fastpath(lock, ctx);
1432 		return 0;
1433 	}
1434 
1435 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1436 }
1437 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1438 
1439 #endif
1440 
1441 /**
1442  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1443  * @cnt: the atomic which we are to dec
1444  * @lock: the mutex to return holding if we dec to 0
1445  *
1446  * return true and hold lock if we dec to 0, return false otherwise
1447  */
1448 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1449 {
1450 	/* dec if we can't possibly hit 0 */
1451 	if (atomic_add_unless(cnt, -1, 1))
1452 		return 0;
1453 	/* we might hit 0, so take the lock */
1454 	mutex_lock(lock);
1455 	if (!atomic_dec_and_test(cnt)) {
1456 		/* when we actually did the dec, we didn't hit 0 */
1457 		mutex_unlock(lock);
1458 		return 0;
1459 	}
1460 	/* we hit 0, and we hold the lock */
1461 	return 1;
1462 }
1463 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1464