xref: /linux-6.15/include/linux/rtmutex.h (revision 199cacd1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * RT Mutexes: blocking mutual exclusion locks with PI support
4  *
5  * started by Ingo Molnar and Thomas Gleixner:
6  *
7  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <[email protected]>
8  *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <[email protected]>
9  *
10  * This file contains the public data structure and API definitions.
11  */
12 
13 #ifndef __LINUX_RT_MUTEX_H
14 #define __LINUX_RT_MUTEX_H
15 
16 #include <linux/linkage.h>
17 #include <linux/rbtree.h>
18 #include <linux/spinlock_types.h>
19 
20 extern int max_lock_depth; /* for sysctl */
21 
22 /**
23  * The rt_mutex structure
24  *
25  * @wait_lock:	spinlock to protect the structure
26  * @waiters:	rbtree root to enqueue waiters in priority order;
27  *              caches top-waiter (leftmost node).
28  * @owner:	the mutex owner
29  */
30 struct rt_mutex {
31 	raw_spinlock_t		wait_lock;
32 	struct rb_root_cached   waiters;
33 	struct task_struct	*owner;
34 #ifdef CONFIG_DEBUG_LOCK_ALLOC
35 	struct lockdep_map	dep_map;
36 #endif
37 };
38 
39 struct rt_mutex_waiter;
40 struct hrtimer_sleeper;
41 
42 #ifdef CONFIG_DEBUG_RT_MUTEXES
43  extern int rt_mutex_debug_check_no_locks_freed(const void *from,
44 						unsigned long len);
45  extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task);
46  extern void rt_mutex_debug_task_free(struct task_struct *tsk);
47 #else
48  static inline int rt_mutex_debug_check_no_locks_freed(const void *from,
49 						       unsigned long len)
50  {
51 	return 0;
52  }
53 # define rt_mutex_debug_check_no_locks_held(task)	do { } while (0)
54 # define rt_mutex_debug_task_free(t)			do { } while (0)
55 #endif
56 
57 #define rt_mutex_init(mutex) \
58 do { \
59 	static struct lock_class_key __key; \
60 	__rt_mutex_init(mutex, __func__, &__key); \
61 } while (0)
62 
63 #ifdef CONFIG_DEBUG_LOCK_ALLOC
64 #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
65 	, .dep_map = { .name = #mutexname }
66 #else
67 #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
68 #endif
69 
70 #define __RT_MUTEX_INITIALIZER(mutexname) \
71 	{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
72 	, .waiters = RB_ROOT_CACHED \
73 	, .owner = NULL \
74 	__DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
75 
76 #define DEFINE_RT_MUTEX(mutexname) \
77 	struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
78 
79 /**
80  * rt_mutex_is_locked - is the mutex locked
81  * @lock: the mutex to be queried
82  *
83  * Returns 1 if the mutex is locked, 0 if unlocked.
84  */
85 static inline int rt_mutex_is_locked(struct rt_mutex *lock)
86 {
87 	return lock->owner != NULL;
88 }
89 
90 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
91 extern void rt_mutex_destroy(struct rt_mutex *lock);
92 
93 #ifdef CONFIG_DEBUG_LOCK_ALLOC
94 extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
95 #define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
96 #else
97 extern void rt_mutex_lock(struct rt_mutex *lock);
98 #define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
99 #endif
100 
101 extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
102 extern int rt_mutex_trylock(struct rt_mutex *lock);
103 
104 extern void rt_mutex_unlock(struct rt_mutex *lock);
105 
106 #endif
107