xref: /linux-6.15/include/linux/spinlock_rt.h (revision 8282947f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #ifndef __LINUX_SPINLOCK_RT_H
3 #define __LINUX_SPINLOCK_RT_H
4 
5 #ifndef __LINUX_SPINLOCK_H
6 #error Do not include directly. Use spinlock.h
7 #endif
8 
9 #ifdef CONFIG_DEBUG_LOCK_ALLOC
10 extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
11 				struct lock_class_key *key);
12 #else
13 static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
14 				       struct lock_class_key *key)
15 {
16 }
17 #endif
18 
19 #define spin_lock_init(slock)				\
20 do {							\
21 	static struct lock_class_key __key;		\
22 							\
23 	rt_mutex_base_init(&(slock)->lock);		\
24 	__rt_spin_lock_init(slock, #slock, &__key);	\
25 } while (0)
26 
27 extern void rt_spin_lock(spinlock_t *lock);
28 extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
29 extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
30 extern void rt_spin_unlock(spinlock_t *lock);
31 extern void rt_spin_lock_unlock(spinlock_t *lock);
32 extern int rt_spin_trylock_bh(spinlock_t *lock);
33 extern int rt_spin_trylock(spinlock_t *lock);
34 
35 static __always_inline void spin_lock(spinlock_t *lock)
36 {
37 	rt_spin_lock(lock);
38 }
39 
40 #ifdef CONFIG_LOCKDEP
41 # define __spin_lock_nested(lock, subclass)				\
42 	rt_spin_lock_nested(lock, subclass)
43 
44 # define __spin_lock_nest_lock(lock, nest_lock)				\
45 	do {								\
46 		typecheck(struct lockdep_map *, &(nest_lock)->dep_map);	\
47 		rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
48 	} while (0)
49 # define __spin_lock_irqsave_nested(lock, flags, subclass)	\
50 	do {							\
51 		typecheck(unsigned long, flags);		\
52 		flags = 0;					\
53 		__spin_lock_nested(lock, subclass);		\
54 	} while (0)
55 
56 #else
57  /*
58   * Always evaluate the 'subclass' argument to avoid that the compiler
59   * warns about set-but-not-used variables when building with
60   * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
61   */
62 # define __spin_lock_nested(lock, subclass)	spin_lock(((void)(subclass), (lock)))
63 # define __spin_lock_nest_lock(lock, subclass)	spin_lock(((void)(subclass), (lock)))
64 # define __spin_lock_irqsave_nested(lock, flags, subclass)	\
65 	spin_lock_irqsave(((void)(subclass), (lock)), flags)
66 #endif
67 
68 #define spin_lock_nested(lock, subclass)		\
69 	__spin_lock_nested(lock, subclass)
70 
71 #define spin_lock_nest_lock(lock, nest_lock)		\
72 	__spin_lock_nest_lock(lock, nest_lock)
73 
74 #define spin_lock_irqsave_nested(lock, flags, subclass)	\
75 	__spin_lock_irqsave_nested(lock, flags, subclass)
76 
77 static __always_inline void spin_lock_bh(spinlock_t *lock)
78 {
79 	/* Investigate: Drop bh when blocking ? */
80 	local_bh_disable();
81 	rt_spin_lock(lock);
82 }
83 
84 static __always_inline void spin_lock_irq(spinlock_t *lock)
85 {
86 	rt_spin_lock(lock);
87 }
88 
89 #define spin_lock_irqsave(lock, flags)			 \
90 	do {						 \
91 		typecheck(unsigned long, flags);	 \
92 		flags = 0;				 \
93 		spin_lock(lock);			 \
94 	} while (0)
95 
96 static __always_inline void spin_unlock(spinlock_t *lock)
97 {
98 	rt_spin_unlock(lock);
99 }
100 
101 static __always_inline void spin_unlock_bh(spinlock_t *lock)
102 {
103 	rt_spin_unlock(lock);
104 	local_bh_enable();
105 }
106 
107 static __always_inline void spin_unlock_irq(spinlock_t *lock)
108 {
109 	rt_spin_unlock(lock);
110 }
111 
112 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
113 						   unsigned long flags)
114 {
115 	rt_spin_unlock(lock);
116 }
117 
118 #define spin_trylock(lock)				\
119 	__cond_lock(lock, rt_spin_trylock(lock))
120 
121 #define spin_trylock_bh(lock)				\
122 	__cond_lock(lock, rt_spin_trylock_bh(lock))
123 
124 #define spin_trylock_irq(lock)				\
125 	__cond_lock(lock, rt_spin_trylock(lock))
126 
127 #define __spin_trylock_irqsave(lock, flags)		\
128 ({							\
129 	int __locked;					\
130 							\
131 	typecheck(unsigned long, flags);		\
132 	flags = 0;					\
133 	__locked = spin_trylock(lock);			\
134 	__locked;					\
135 })
136 
137 #define spin_trylock_irqsave(lock, flags)		\
138 	__cond_lock(lock, __spin_trylock_irqsave(lock, flags))
139 
140 #define spin_is_contended(lock)		(((void)(lock), 0))
141 
142 static inline int spin_is_locked(spinlock_t *lock)
143 {
144 	return rt_mutex_base_is_locked(&lock->lock);
145 }
146 
147 #define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
148 
149 #include <linux/rwlock_rt.h>
150 
151 #endif
152