xref: /linux-6.15/include/linux/rtmutex.h (revision 6d41c675)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * RT Mutexes: blocking mutual exclusion locks with PI support
4  *
5  * started by Ingo Molnar and Thomas Gleixner:
6  *
7  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <[email protected]>
8  *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <[email protected]>
9  *
10  * This file contains the public data structure and API definitions.
11  */
12 
13 #ifndef __LINUX_RT_MUTEX_H
14 #define __LINUX_RT_MUTEX_H
15 
16 #include <linux/linkage.h>
17 #include <linux/rbtree.h>
18 #include <linux/spinlock_types.h>
19 
20 extern int max_lock_depth; /* for sysctl */
21 
22 /**
23  * The rt_mutex structure
24  *
25  * @wait_lock:	spinlock to protect the structure
26  * @waiters:	rbtree root to enqueue waiters in priority order;
27  *              caches top-waiter (leftmost node).
28  * @owner:	the mutex owner
29  */
30 struct rt_mutex {
31 	raw_spinlock_t		wait_lock;
32 	struct rb_root_cached   waiters;
33 	struct task_struct	*owner;
34 #ifdef CONFIG_DEBUG_LOCK_ALLOC
35 	struct lockdep_map	dep_map;
36 #endif
37 };
38 
39 struct rt_mutex_waiter;
40 struct hrtimer_sleeper;
41 
42 #ifdef CONFIG_DEBUG_RT_MUTEXES
43  extern int rt_mutex_debug_check_no_locks_freed(const void *from,
44 						unsigned long len);
45  extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task);
46 #else
47  static inline int rt_mutex_debug_check_no_locks_freed(const void *from,
48 						       unsigned long len)
49  {
50 	return 0;
51  }
52 # define rt_mutex_debug_check_no_locks_held(task)	do { } while (0)
53 #endif
54 
55 #ifdef CONFIG_DEBUG_RT_MUTEXES
56 
57 # define rt_mutex_init(mutex) \
58 do { \
59 	static struct lock_class_key __key; \
60 	__rt_mutex_init(mutex, __func__, &__key); \
61 } while (0)
62 
63  extern void rt_mutex_debug_task_free(struct task_struct *tsk);
64 #else
65 # define rt_mutex_init(mutex)			__rt_mutex_init(mutex, NULL, NULL)
66 # define rt_mutex_debug_task_free(t)			do { } while (0)
67 #endif
68 
69 #ifdef CONFIG_DEBUG_LOCK_ALLOC
70 #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
71 	, .dep_map = { .name = #mutexname }
72 #else
73 #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
74 #endif
75 
76 #define __RT_MUTEX_INITIALIZER(mutexname) \
77 	{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
78 	, .waiters = RB_ROOT_CACHED \
79 	, .owner = NULL \
80 	__DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
81 
82 #define DEFINE_RT_MUTEX(mutexname) \
83 	struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
84 
85 /**
86  * rt_mutex_is_locked - is the mutex locked
87  * @lock: the mutex to be queried
88  *
89  * Returns 1 if the mutex is locked, 0 if unlocked.
90  */
91 static inline int rt_mutex_is_locked(struct rt_mutex *lock)
92 {
93 	return lock->owner != NULL;
94 }
95 
96 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
97 extern void rt_mutex_destroy(struct rt_mutex *lock);
98 
99 #ifdef CONFIG_DEBUG_LOCK_ALLOC
100 extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
101 #define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
102 #else
103 extern void rt_mutex_lock(struct rt_mutex *lock);
104 #define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
105 #endif
106 
107 extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
108 extern int rt_mutex_trylock(struct rt_mutex *lock);
109 
110 extern void rt_mutex_unlock(struct rt_mutex *lock);
111 
112 #endif
113