xref: /linux-6.15/include/linux/spinlock.h (revision e00a844a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SPINLOCK_H
3 #define __LINUX_SPINLOCK_H
4 
5 /*
6  * include/linux/spinlock.h - generic spinlock/rwlock declarations
7  *
8  * here's the role of the various spinlock/rwlock related include files:
9  *
10  * on SMP builds:
11  *
12  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
13  *                        initializers
14  *
15  *  linux/spinlock_types.h:
16  *                        defines the generic type and initializers
17  *
18  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
19  *                        implementations, mostly inline assembly code
20  *
21  *   (also included on UP-debug builds:)
22  *
23  *  linux/spinlock_api_smp.h:
24  *                        contains the prototypes for the _spin_*() APIs.
25  *
26  *  linux/spinlock.h:     builds the final spin_*() APIs.
27  *
28  * on UP builds:
29  *
30  *  linux/spinlock_type_up.h:
31  *                        contains the generic, simplified UP spinlock type.
32  *                        (which is an empty structure on non-debug builds)
33  *
34  *  linux/spinlock_types.h:
35  *                        defines the generic type and initializers
36  *
37  *  linux/spinlock_up.h:
38  *                        contains the arch_spin_*()/etc. version of UP
39  *                        builds. (which are NOPs on non-debug, non-preempt
40  *                        builds)
41  *
42  *   (included on UP-non-debug builds:)
43  *
44  *  linux/spinlock_api_up.h:
45  *                        builds the _spin_*() APIs.
46  *
47  *  linux/spinlock.h:     builds the final spin_*() APIs.
48  */
49 
50 #include <linux/typecheck.h>
51 #include <linux/preempt.h>
52 #include <linux/linkage.h>
53 #include <linux/compiler.h>
54 #include <linux/irqflags.h>
55 #include <linux/thread_info.h>
56 #include <linux/kernel.h>
57 #include <linux/stringify.h>
58 #include <linux/bottom_half.h>
59 #include <asm/barrier.h>
60 
61 
62 /*
63  * Must define these before including other files, inline functions need them
64  */
65 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
66 
67 #define LOCK_SECTION_START(extra)               \
68         ".subsection 1\n\t"                     \
69         extra                                   \
70         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
71         LOCK_SECTION_NAME ":\n\t"               \
72         ".endif\n"
73 
74 #define LOCK_SECTION_END                        \
75         ".previous\n\t"
76 
77 #define __lockfunc __attribute__((section(".spinlock.text")))
78 
79 /*
80  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
81  */
82 #include <linux/spinlock_types.h>
83 
84 /*
85  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
86  */
87 #ifdef CONFIG_SMP
88 # include <asm/spinlock.h>
89 #else
90 # include <linux/spinlock_up.h>
91 #endif
92 
93 #ifdef CONFIG_DEBUG_SPINLOCK
94   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
95 				   struct lock_class_key *key);
96 # define raw_spin_lock_init(lock)				\
97 do {								\
98 	static struct lock_class_key __key;			\
99 								\
100 	__raw_spin_lock_init((lock), #lock, &__key);		\
101 } while (0)
102 
103 #else
104 # define raw_spin_lock_init(lock)				\
105 	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
106 #endif
107 
108 #define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
109 
110 #ifdef CONFIG_GENERIC_LOCKBREAK
111 #define raw_spin_is_contended(lock) ((lock)->break_lock)
112 #else
113 
114 #ifdef arch_spin_is_contended
115 #define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
116 #else
117 #define raw_spin_is_contended(lock)	(((void)(lock), 0))
118 #endif /*arch_spin_is_contended*/
119 #endif
120 
121 /*
122  * This barrier must provide two things:
123  *
124  *   - it must guarantee a STORE before the spin_lock() is ordered against a
125  *     LOAD after it, see the comments at its two usage sites.
126  *
127  *   - it must ensure the critical section is RCsc.
128  *
129  * The latter is important for cases where we observe values written by other
130  * CPUs in spin-loops, without barriers, while being subject to scheduling.
131  *
132  * CPU0			CPU1			CPU2
133  *
134  *			for (;;) {
135  *			  if (READ_ONCE(X))
136  *			    break;
137  *			}
138  * X=1
139  *			<sched-out>
140  *						<sched-in>
141  *						r = X;
142  *
143  * without transitivity it could be that CPU1 observes X!=0 breaks the loop,
144  * we get migrated and CPU2 sees X==0.
145  *
146  * Since most load-store architectures implement ACQUIRE with an smp_mb() after
147  * the LL/SC loop, they need no further barriers. Similarly all our TSO
148  * architectures imply an smp_mb() for each atomic instruction and equally don't
149  * need more.
150  *
151  * Architectures that can implement ACQUIRE better need to take care.
152  */
153 #ifndef smp_mb__after_spinlock
154 #define smp_mb__after_spinlock()	do { } while (0)
155 #endif
156 
157 #ifdef CONFIG_DEBUG_SPINLOCK
158  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
159 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
160  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
161  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
162 #else
163 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
164 {
165 	__acquire(lock);
166 	arch_spin_lock(&lock->raw_lock);
167 }
168 
169 #ifndef arch_spin_lock_flags
170 #define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
171 #endif
172 
173 static inline void
174 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
175 {
176 	__acquire(lock);
177 	arch_spin_lock_flags(&lock->raw_lock, *flags);
178 }
179 
180 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
181 {
182 	return arch_spin_trylock(&(lock)->raw_lock);
183 }
184 
185 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
186 {
187 	arch_spin_unlock(&lock->raw_lock);
188 	__release(lock);
189 }
190 #endif
191 
192 /*
193  * Define the various spin_lock methods.  Note we define these
194  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
195  * various methods are defined as nops in the case they are not
196  * required.
197  */
198 #define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
199 
200 #define raw_spin_lock(lock)	_raw_spin_lock(lock)
201 
202 #ifdef CONFIG_DEBUG_LOCK_ALLOC
203 # define raw_spin_lock_nested(lock, subclass) \
204 	_raw_spin_lock_nested(lock, subclass)
205 
206 # define raw_spin_lock_nest_lock(lock, nest_lock)			\
207 	 do {								\
208 		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
209 		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
210 	 } while (0)
211 #else
212 /*
213  * Always evaluate the 'subclass' argument to avoid that the compiler
214  * warns about set-but-not-used variables when building with
215  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
216  */
217 # define raw_spin_lock_nested(lock, subclass)		\
218 	_raw_spin_lock(((void)(subclass), (lock)))
219 # define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
220 #endif
221 
222 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
223 
224 #define raw_spin_lock_irqsave(lock, flags)			\
225 	do {						\
226 		typecheck(unsigned long, flags);	\
227 		flags = _raw_spin_lock_irqsave(lock);	\
228 	} while (0)
229 
230 #ifdef CONFIG_DEBUG_LOCK_ALLOC
231 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
232 	do {								\
233 		typecheck(unsigned long, flags);			\
234 		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
235 	} while (0)
236 #else
237 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
238 	do {								\
239 		typecheck(unsigned long, flags);			\
240 		flags = _raw_spin_lock_irqsave(lock);			\
241 	} while (0)
242 #endif
243 
244 #else
245 
246 #define raw_spin_lock_irqsave(lock, flags)		\
247 	do {						\
248 		typecheck(unsigned long, flags);	\
249 		_raw_spin_lock_irqsave(lock, flags);	\
250 	} while (0)
251 
252 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
253 	raw_spin_lock_irqsave(lock, flags)
254 
255 #endif
256 
257 #define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
258 #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
259 #define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
260 #define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
261 
262 #define raw_spin_unlock_irqrestore(lock, flags)		\
263 	do {							\
264 		typecheck(unsigned long, flags);		\
265 		_raw_spin_unlock_irqrestore(lock, flags);	\
266 	} while (0)
267 #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
268 
269 #define raw_spin_trylock_bh(lock) \
270 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
271 
272 #define raw_spin_trylock_irq(lock) \
273 ({ \
274 	local_irq_disable(); \
275 	raw_spin_trylock(lock) ? \
276 	1 : ({ local_irq_enable(); 0;  }); \
277 })
278 
279 #define raw_spin_trylock_irqsave(lock, flags) \
280 ({ \
281 	local_irq_save(flags); \
282 	raw_spin_trylock(lock) ? \
283 	1 : ({ local_irq_restore(flags); 0; }); \
284 })
285 
286 /* Include rwlock functions */
287 #include <linux/rwlock.h>
288 
289 /*
290  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
291  */
292 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
293 # include <linux/spinlock_api_smp.h>
294 #else
295 # include <linux/spinlock_api_up.h>
296 #endif
297 
298 /*
299  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
300  */
301 
302 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
303 {
304 	return &lock->rlock;
305 }
306 
307 #define spin_lock_init(_lock)				\
308 do {							\
309 	spinlock_check(_lock);				\
310 	raw_spin_lock_init(&(_lock)->rlock);		\
311 } while (0)
312 
313 static __always_inline void spin_lock(spinlock_t *lock)
314 {
315 	raw_spin_lock(&lock->rlock);
316 }
317 
318 static __always_inline void spin_lock_bh(spinlock_t *lock)
319 {
320 	raw_spin_lock_bh(&lock->rlock);
321 }
322 
323 static __always_inline int spin_trylock(spinlock_t *lock)
324 {
325 	return raw_spin_trylock(&lock->rlock);
326 }
327 
328 #define spin_lock_nested(lock, subclass)			\
329 do {								\
330 	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
331 } while (0)
332 
333 #define spin_lock_nest_lock(lock, nest_lock)				\
334 do {									\
335 	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
336 } while (0)
337 
338 static __always_inline void spin_lock_irq(spinlock_t *lock)
339 {
340 	raw_spin_lock_irq(&lock->rlock);
341 }
342 
343 #define spin_lock_irqsave(lock, flags)				\
344 do {								\
345 	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
346 } while (0)
347 
348 #define spin_lock_irqsave_nested(lock, flags, subclass)			\
349 do {									\
350 	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
351 } while (0)
352 
353 static __always_inline void spin_unlock(spinlock_t *lock)
354 {
355 	raw_spin_unlock(&lock->rlock);
356 }
357 
358 static __always_inline void spin_unlock_bh(spinlock_t *lock)
359 {
360 	raw_spin_unlock_bh(&lock->rlock);
361 }
362 
363 static __always_inline void spin_unlock_irq(spinlock_t *lock)
364 {
365 	raw_spin_unlock_irq(&lock->rlock);
366 }
367 
368 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
369 {
370 	raw_spin_unlock_irqrestore(&lock->rlock, flags);
371 }
372 
373 static __always_inline int spin_trylock_bh(spinlock_t *lock)
374 {
375 	return raw_spin_trylock_bh(&lock->rlock);
376 }
377 
378 static __always_inline int spin_trylock_irq(spinlock_t *lock)
379 {
380 	return raw_spin_trylock_irq(&lock->rlock);
381 }
382 
383 #define spin_trylock_irqsave(lock, flags)			\
384 ({								\
385 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
386 })
387 
388 static __always_inline int spin_is_locked(spinlock_t *lock)
389 {
390 	return raw_spin_is_locked(&lock->rlock);
391 }
392 
393 static __always_inline int spin_is_contended(spinlock_t *lock)
394 {
395 	return raw_spin_is_contended(&lock->rlock);
396 }
397 
398 #define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
399 
400 /*
401  * Pull the atomic_t declaration:
402  * (asm-mips/atomic.h needs above definitions)
403  */
404 #include <linux/atomic.h>
405 /**
406  * atomic_dec_and_lock - lock on reaching reference count zero
407  * @atomic: the atomic counter
408  * @lock: the spinlock in question
409  *
410  * Decrements @atomic by 1.  If the result is 0, returns true and locks
411  * @lock.  Returns false for all other cases.
412  */
413 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
414 #define atomic_dec_and_lock(atomic, lock) \
415 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
416 
417 #endif /* __LINUX_SPINLOCK_H */
418